In [ ]:
import numpy as np
import pandas as pd
from packaging import version
import time
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error as MSE
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import models, layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, BatchNormalization, Dropout, Flatten, Dense
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.preprocessing import image
from tensorflow.keras.utils import to_categorical
import tensorflow.keras.backend as k
%matplotlib inline
np.set_printoptions(precision=3, suppress=True)
print("This notebook requires TensorFlow 2.0 or above")
print("TensorFlow version: ", tf.__version__)
assert version.parse(tf.__version__).release[0] >=2
print("Keras version: ", keras.__version__)
This notebook requires TensorFlow 2.0 or above TensorFlow version: 2.18.0 Keras version: 3.8.0
In [ ]:
# Loading the cifar10 Dataset
(train_images, train_labels), (test_images, test_labels) = keras.datasets.cifar10.load_data()
Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz 170498071/170498071 ━━━━━━━━━━━━━━━━━━━━ 15s 0us/step
In [ ]:
# Exploratory Data Analysis
print(f"train images shape`: {train_images.shape}")
print(f"train labels shape: {train_labels.shape}")
print(f"test images shape: {test_images.shape}")
print(f"test labels shape: {test_labels.shape}")
train images shape`: (50000, 32, 32, 3) train labels shape: (50000, 1) test images shape: (10000, 32, 32, 3) test labels shape: (10000, 1)
In [ ]:
# Explore the labels, labeled as a numerical digit that needs conversion
# to an item description
print(f"First 10 training labels: {train_labels[:10]}")
First 10 training labels: [[6] [9] [9] [4] [1] [1] [2] [7] [8] [3]]
In [ ]:
# Data Analysis Functions
def show_random_examples(x, y, p):
indices = np.random.choice(range(x.shape[0]), 10, replace=False)
x = x[indices]
y = y[indices]
p = p[indices]
plt.figure(figsize=(10, 5))
for i in range(10):
plt.subplot(2, 5, i + 1)
plt.imshow(x[i])
plt.xticks([])
plt.yticks([])
col = 'green' if np.argmax(y[i]) == np.argmax(p[i]) else 'red'
plt.xlabel(class_names_preview[np.argmax(p[i])], color=col)
plt.show()
def get_three_classes(x, y):
def indices_of(class_id):
indices, _ = np.where(y == float(class_id))
return indices
indices = np.concatenate([indices_of(0), indices_of(1), indices_of(2)], axis=0)
x = x[indices]
y = y[indices]
count = x.shape[0]
indices = np.random.choice(range(count), count, replace=False)
x = x[indices]
y = y[indices]
y = tf.keras.utils.to_categorical(y)
return x, y
def plot_history(history):
losses = history.history['loss']
accs = history.history['accuracy']
val_losses = history.history['val_loss']
val_accs = history.history['val_accuracy']
epochs = len(losses)
plt.figure(figsize=(16, 4))
for i, metrics in enumerate(zip([losses, accs], [val_losses, val_accs], ['Loss', 'Accuracy'])):
plt.subplot(1, 2, i + 1)
plt.plot(range(epochs), metrics[0], label='Training {}'.format(metrics[2]))
plt.plot(range(epochs), metrics[1], label='Validation {}'.format(metrics[2]))
plt.legend()
plt.show()
def display_training_curves(training, validation, title, subplot):
ax = plt.subplot(subplot)
ax.plot(training)
ax.plot(validation)
ax.set_title('model '+ title)
ax.set_ylabel(title)
ax.set_xlabel('epoch')
ax.legend(['training', 'validation'])
def print_validation_report(y_test, predictions):
print("Classification Report")
print(classification_report(y_test, predictions))
print('Accuracy Score: {}'.format(accuracy_score(y_test, predictions)))
print('Root Mean Square Error: {}'.format(np.sqrt(MSE(y_test, predictions))))
def plot_confusion_matrix(y_true, y_pred):
mtx = confusion_matrix(y_true, y_pred)
fig, ax = plt.subplots(figsize=(16,12))
sns.heatmap(mtx, annot=True, fmt='d', linewidths=.75, cbar=False, ax=ax,cmap='Blues',linecolor='white')
# square=True,
plt.ylabel('true label')
plt.xlabel('predicted label')
In [ ]:
train_image_preview, train_label_preview = get_three_classes(train_images, train_labels)
test_image_preview, test_label_preview = get_three_classes(test_images, test_labels)
class_names_preview = ['airplane', 'car', 'bird']
show_random_examples(train_image_preview, train_label_preview, train_label_preview)
In [ ]:
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog','frog', 'horse' ,'ship' ,'truck']
In [ ]:
image_train_split, image_val_split, label_train_split, label_val_split = train_test_split(train_images, train_labels, test_size=.1, random_state=42, shuffle=True)
print(image_train_split.shape)
print(image_val_split.shape)
print(label_train_split.shape)
print(label_val_split.shape)
(45000, 32, 32, 3) (5000, 32, 32, 3) (45000, 1) (5000, 1)
In [ ]:
image_train_norm = image_train_split / 255.0
image_val_norm = image_val_split / 255.0
image_test_norm = test_images / 255.0
image_train_norm.shape
Out[ ]:
(45000, 32, 32, 3)
In [ ]:
# Build Model 1: Single layer DNN
name = 'DNN'
k.clear_session()
model = models.Sequential()
model.add(layers.Dense(units=128, input_shape=((32,32,3,)), activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 8s 7ms/step - accuracy: 0.3554 - loss: 2.0319 - val_accuracy: 0.4684 - val_loss: 1.5425 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.4819 - loss: 1.4975 - val_accuracy: 0.4506 - val_loss: 1.5612 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5032 - loss: 1.4394 - val_accuracy: 0.4680 - val_loss: 1.5505 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.5157 - loss: 1.4038 - val_accuracy: 0.4850 - val_loss: 1.4996 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.5290 - loss: 1.3732 - val_accuracy: 0.4888 - val_loss: 1.4895 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.5416 - loss: 1.3324 - val_accuracy: 0.4822 - val_loss: 1.5002 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.5444 - loss: 1.3170 - val_accuracy: 0.4906 - val_loss: 1.4861 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.5632 - loss: 1.2782 - val_accuracy: 0.4928 - val_loss: 1.4754 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.5702 - loss: 1.2573 - val_accuracy: 0.4902 - val_loss: 1.4849 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.5788 - loss: 1.2422 - val_accuracy: 0.4840 - val_loss: 1.5137 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.5848 - loss: 1.2095 - val_accuracy: 0.4762 - val_loss: 1.5366 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.4788 - loss: 1.5209 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.56 0.42 0.48 1000 1 0.65 0.48 0.56 1000 2 0.45 0.28 0.35 1000 3 0.32 0.39 0.35 1000 4 0.48 0.31 0.37 1000 5 0.41 0.40 0.40 1000 6 0.55 0.44 0.49 1000 7 0.42 0.68 0.52 1000 8 0.50 0.71 0.58 1000 9 0.51 0.63 0.57 1000 accuracy 0.47 10000 macro avg 0.49 0.47 0.47 10000 weighted avg 0.49 0.47 0.47 10000 Accuracy Score: 0.4734 Root Mean Square Error: 3.2584658967066082
In [ ]:
# Build initial data for table
data = {}
data['model'] = ['DNN']
data['accuracy'] = [f"{history.history['accuracy'][-1]:.3f}"]
data['val_accuracy'] = [f"{history.history['val_accuracy'][-1]:.3f}"]
data['test_accuracy'] = [f"{test_pred[1]:.3f}"]
data['loss'] = [f"{history.history['loss'][-1]:.3f}"]
data['val_loss'] = [f"{history.history['val_loss'][-1]:.3f}"]
data['test_loss'] = [f"{test_pred[0]:.3f}"]
data['time'] = [f"{time_end - time_start:.3f}"]
In [ ]:
def add_to_data(data, model, history, test_pred):
data['model'].append(model)
data['accuracy'].append(f"{history.history['accuracy'][-1]:.3f}")
data['val_accuracy'].append(f"{history.history['val_accuracy'][-1]:.3f}")
data['test_accuracy'].append(f"{test_pred[1]:.3f}")
data['loss'].append(f"{history.history['loss'][-1]:.3f}")
data['val_loss'].append(f"{history.history['val_loss'][-1]:.3f}")
data['test_loss'].append(f"{test_pred[0]:.3f}")
data['time'].append(f"{time_end - time_start:.3f}")
In [ ]:
# Build Model 2: 2 Layer dense neural network
name = 'DNN_DNN'
k.clear_session()
model = models.Sequential()
model.add(layers.Dense(units=128, input_shape=((32,32,3,)), activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.Dense(units=128, input_shape=((32,32,3,)), activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 11ms/step - accuracy: 0.3716 - loss: 1.9046 - val_accuracy: 0.4664 - val_loss: 1.5791 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 9ms/step - accuracy: 0.4902 - loss: 1.5102 - val_accuracy: 0.4686 - val_loss: 1.5630 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.5155 - loss: 1.4308 - val_accuracy: 0.4850 - val_loss: 1.5330 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.5309 - loss: 1.3817 - val_accuracy: 0.4880 - val_loss: 1.5128 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 8ms/step - accuracy: 0.5414 - loss: 1.3498 - val_accuracy: 0.4922 - val_loss: 1.5005 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.5680 - loss: 1.2903 - val_accuracy: 0.4830 - val_loss: 1.5165 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 9ms/step - accuracy: 0.5759 - loss: 1.2589 - val_accuracy: 0.4782 - val_loss: 1.5242 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 8ms/step - accuracy: 0.5980 - loss: 1.2107 - val_accuracy: 0.4770 - val_loss: 1.5550 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.4808 - loss: 1.5365 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.50 0.58 0.54 1000 1 0.49 0.75 0.59 1000 2 0.38 0.32 0.35 1000 3 0.33 0.30 0.31 1000 4 0.44 0.37 0.40 1000 5 0.37 0.46 0.41 1000 6 0.54 0.47 0.50 1000 7 0.58 0.55 0.56 1000 8 0.60 0.56 0.58 1000 9 0.63 0.41 0.50 1000 accuracy 0.48 10000 macro avg 0.48 0.48 0.47 10000 weighted avg 0.48 0.48 0.47 10000 Accuracy Score: 0.4787 Root Mean Square Error: 3.1903604811995776
In [ ]:
# Build Model 3: Single layer CNN
name = 'CNN'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 8s 8ms/step - accuracy: 0.3975 - loss: 1.8340 - val_accuracy: 0.5096 - val_loss: 1.3822 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.5877 - loss: 1.1900 - val_accuracy: 0.5668 - val_loss: 1.2588 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6375 - loss: 1.0481 - val_accuracy: 0.5906 - val_loss: 1.1787 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6861 - loss: 0.9190 - val_accuracy: 0.6164 - val_loss: 1.1404 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.7294 - loss: 0.8000 - val_accuracy: 0.6130 - val_loss: 1.1564 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.7591 - loss: 0.7025 - val_accuracy: 0.6150 - val_loss: 1.1564 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.7952 - loss: 0.6162 - val_accuracy: 0.6202 - val_loss: 1.1794 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.8228 - loss: 0.5362 - val_accuracy: 0.6106 - val_loss: 1.2584 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.8620 - loss: 0.4385 - val_accuracy: 0.6098 - val_loss: 1.2900 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.8804 - loss: 0.3857 - val_accuracy: 0.6040 - val_loss: 1.4168 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6016 - loss: 1.4354 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.54 0.68 0.60 1000 1 0.69 0.78 0.73 1000 2 0.58 0.31 0.41 1000 3 0.47 0.34 0.39 1000 4 0.58 0.52 0.55 1000 5 0.47 0.59 0.53 1000 6 0.80 0.57 0.67 1000 7 0.58 0.73 0.65 1000 8 0.68 0.74 0.71 1000 9 0.62 0.70 0.66 1000 accuracy 0.60 10000 macro avg 0.60 0.60 0.59 10000 weighted avg 0.60 0.60 0.59 10000 Accuracy Score: 0.5972 Root Mean Square Error: 2.802427519134081
In [ ]:
# Build Model 4: 2 layer CNN
name = 'CNN_CNN'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 14s 15ms/step - accuracy: 0.4004 - loss: 1.6900 - val_accuracy: 0.5746 - val_loss: 1.2167 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 17s 13ms/step - accuracy: 0.6077 - loss: 1.1278 - val_accuracy: 0.6198 - val_loss: 1.0876 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 12ms/step - accuracy: 0.6820 - loss: 0.9228 - val_accuracy: 0.6338 - val_loss: 1.0392 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 12ms/step - accuracy: 0.7224 - loss: 0.8108 - val_accuracy: 0.6504 - val_loss: 1.0426 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 12ms/step - accuracy: 0.7570 - loss: 0.7032 - val_accuracy: 0.6476 - val_loss: 1.0337 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 8s 12ms/step - accuracy: 0.7913 - loss: 0.6109 - val_accuracy: 0.6470 - val_loss: 1.0915 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 8s 12ms/step - accuracy: 0.8216 - loss: 0.5271 - val_accuracy: 0.6184 - val_loss: 1.2345 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.6120 - loss: 1.2551 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step Classification Report precision recall f1-score support 0 0.60 0.70 0.65 1000 1 0.79 0.67 0.73 1000 2 0.50 0.48 0.49 1000 3 0.39 0.50 0.44 1000 4 0.58 0.54 0.56 1000 5 0.56 0.46 0.50 1000 6 0.87 0.47 0.61 1000 7 0.67 0.69 0.68 1000 8 0.65 0.81 0.72 1000 9 0.64 0.76 0.70 1000 accuracy 0.61 10000 macro avg 0.63 0.61 0.61 10000 weighted avg 0.63 0.61 0.61 10000 Accuracy Score: 0.6097 Root Mean Square Error: 2.704662640700315
In [ ]:
# Build Model 5: CNN and MaxPool 2 total layers
name = 'CNN_MP'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 8ms/step - accuracy: 0.4030 - loss: 1.6897 - val_accuracy: 0.5390 - val_loss: 1.3070 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.5778 - loss: 1.2110 - val_accuracy: 0.5794 - val_loss: 1.1983 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6297 - loss: 1.0729 - val_accuracy: 0.6050 - val_loss: 1.1350 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6539 - loss: 0.9969 - val_accuracy: 0.6270 - val_loss: 1.0829 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6834 - loss: 0.9202 - val_accuracy: 0.6408 - val_loss: 1.0442 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.7093 - loss: 0.8480 - val_accuracy: 0.6382 - val_loss: 1.0599 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7212 - loss: 0.8085 - val_accuracy: 0.6428 - val_loss: 1.0391 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7472 - loss: 0.7498 - val_accuracy: 0.6360 - val_loss: 1.0625 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.7618 - loss: 0.6995 - val_accuracy: 0.6476 - val_loss: 1.0274 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.7793 - loss: 0.6490 - val_accuracy: 0.6436 - val_loss: 1.0583 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7945 - loss: 0.6174 - val_accuracy: 0.6448 - val_loss: 1.0647 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.8050 - loss: 0.5742 - val_accuracy: 0.6194 - val_loss: 1.1771 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6342 - loss: 1.1847 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.60 0.68 0.64 1000 1 0.71 0.80 0.75 1000 2 0.63 0.31 0.41 1000 3 0.55 0.33 0.41 1000 4 0.56 0.63 0.59 1000 5 0.50 0.64 0.56 1000 6 0.78 0.68 0.73 1000 7 0.78 0.64 0.70 1000 8 0.57 0.87 0.69 1000 9 0.69 0.71 0.70 1000 accuracy 0.63 10000 macro avg 0.64 0.63 0.62 10000 weighted avg 0.64 0.63 0.62 10000 Accuracy Score: 0.6285 Root Mean Square Error: 2.6656706473231084
In [ ]:
# Model 6: Build Model 1 With Dropout layer
name = 'DNN_DO'
k.clear_session()
model = models.Sequential()
model.add(layers.Dense(units=128, input_shape=((32,32,3,)), activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 8s 9ms/step - accuracy: 0.3584 - loss: 1.9644 - val_accuracy: 0.4404 - val_loss: 1.5949 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.4809 - loss: 1.5060 - val_accuracy: 0.4672 - val_loss: 1.5405 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.4935 - loss: 1.4557 - val_accuracy: 0.4774 - val_loss: 1.5222 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.5120 - loss: 1.4172 - val_accuracy: 0.4732 - val_loss: 1.5098 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.5262 - loss: 1.3722 - val_accuracy: 0.4792 - val_loss: 1.4877 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5332 - loss: 1.3578 - val_accuracy: 0.4796 - val_loss: 1.5135 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5370 - loss: 1.3507 - val_accuracy: 0.4808 - val_loss: 1.5110 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.5511 - loss: 1.3098 - val_accuracy: 0.4900 - val_loss: 1.4822 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.5590 - loss: 1.2854 - val_accuracy: 0.4820 - val_loss: 1.5143 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5616 - loss: 1.2766 - val_accuracy: 0.4906 - val_loss: 1.4987 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.5698 - loss: 1.2605 - val_accuracy: 0.4894 - val_loss: 1.5020 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 5ms/step - accuracy: 0.5775 - loss: 1.2284 - val_accuracy: 0.4870 - val_loss: 1.5109 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.5862 - loss: 1.2169 - val_accuracy: 0.4894 - val_loss: 1.5293 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.4877 - loss: 1.5036 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.50 0.50 0.50 1000 1 0.62 0.55 0.59 1000 2 0.38 0.37 0.37 1000 3 0.34 0.39 0.37 1000 4 0.41 0.41 0.41 1000 5 0.49 0.28 0.36 1000 6 0.58 0.44 0.50 1000 7 0.47 0.63 0.54 1000 8 0.53 0.68 0.59 1000 9 0.56 0.58 0.57 1000 accuracy 0.48 10000 macro avg 0.49 0.48 0.48 10000 weighted avg 0.49 0.48 0.48 10000 Accuracy Score: 0.4832 Root Mean Square Error: 3.1805974281571694
In [ ]:
# Model 7: Build Model 2 with Drop out between layers
name = 'DNN_DO_DNN'
k.clear_session()
model = models.Sequential()
model.add(layers.Dense(units=128, input_shape=((32,32,3,)), activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.Dropout(0.25))
model.add(layers.Dense(units=128, activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 12ms/step - accuracy: 0.3467 - loss: 1.9535 - val_accuracy: 0.4546 - val_loss: 1.6010 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - accuracy: 0.4762 - loss: 1.5326 - val_accuracy: 0.4738 - val_loss: 1.5392 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.5014 - loss: 1.4575 - val_accuracy: 0.4858 - val_loss: 1.5045 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 9ms/step - accuracy: 0.5216 - loss: 1.4084 - val_accuracy: 0.4850 - val_loss: 1.4994 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.5333 - loss: 1.3741 - val_accuracy: 0.4880 - val_loss: 1.5084 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 9ms/step - accuracy: 0.5410 - loss: 1.3487 - val_accuracy: 0.4882 - val_loss: 1.4876 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.5578 - loss: 1.3061 - val_accuracy: 0.4860 - val_loss: 1.5056 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.5678 - loss: 1.2762 - val_accuracy: 0.4988 - val_loss: 1.4883 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 9ms/step - accuracy: 0.5748 - loss: 1.2716 - val_accuracy: 0.4896 - val_loss: 1.4974 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.5893 - loss: 1.2268 - val_accuracy: 0.4880 - val_loss: 1.5239 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 9ms/step - accuracy: 0.5983 - loss: 1.2010 - val_accuracy: 0.4844 - val_loss: 1.5264 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.4903 - loss: 1.5164 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.46 0.63 0.53 1000 1 0.60 0.59 0.60 1000 2 0.43 0.24 0.31 1000 3 0.33 0.38 0.35 1000 4 0.45 0.37 0.41 1000 5 0.43 0.42 0.42 1000 6 0.52 0.54 0.53 1000 7 0.56 0.53 0.54 1000 8 0.58 0.59 0.58 1000 9 0.54 0.61 0.57 1000 accuracy 0.49 10000 macro avg 0.49 0.49 0.49 10000 weighted avg 0.49 0.49 0.49 10000 Accuracy Score: 0.4904 Root Mean Square Error: 3.2068832220709256
In [ ]:
# Model 8: Build Model 3 with Dropout after Conv2D
name = 'CNN_DO'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 8ms/step - accuracy: 0.3934 - loss: 1.7601 - val_accuracy: 0.5090 - val_loss: 1.3747 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 6ms/step - accuracy: 0.5729 - loss: 1.2198 - val_accuracy: 0.5552 - val_loss: 1.2663 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6209 - loss: 1.1001 - val_accuracy: 0.5692 - val_loss: 1.2171 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.6522 - loss: 1.0004 - val_accuracy: 0.5854 - val_loss: 1.2273 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6870 - loss: 0.9052 - val_accuracy: 0.6046 - val_loss: 1.1787 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7214 - loss: 0.8095 - val_accuracy: 0.5958 - val_loss: 1.2153 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.7469 - loss: 0.7329 - val_accuracy: 0.5966 - val_loss: 1.2354 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7750 - loss: 0.6605 - val_accuracy: 0.6110 - val_loss: 1.2249 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.8023 - loss: 0.5869 - val_accuracy: 0.6034 - val_loss: 1.2605 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.8210 - loss: 0.5269 - val_accuracy: 0.6178 - val_loss: 1.2482 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.8419 - loss: 0.4738 - val_accuracy: 0.5960 - val_loss: 1.3852 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.8554 - loss: 0.4328 - val_accuracy: 0.6146 - val_loss: 1.3631 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.8726 - loss: 0.3816 - val_accuracy: 0.6108 - val_loss: 1.4472 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.5942 - loss: 1.4806 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.68 0.57 0.62 1000 1 0.72 0.75 0.73 1000 2 0.50 0.35 0.41 1000 3 0.46 0.32 0.38 1000 4 0.51 0.57 0.54 1000 5 0.45 0.62 0.52 1000 6 0.55 0.81 0.66 1000 7 0.72 0.59 0.65 1000 8 0.70 0.74 0.72 1000 9 0.74 0.65 0.69 1000 accuracy 0.60 10000 macro avg 0.60 0.60 0.59 10000 weighted avg 0.60 0.60 0.59 10000 Accuracy Score: 0.5973 Root Mean Square Error: 2.6729571638917076
In [ ]:
# Model 9: Build Model 4 with Dropout between layers
name = 'CNN_DO_CNN'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 15s 17ms/step - accuracy: 0.3853 - loss: 1.7216 - val_accuracy: 0.5258 - val_loss: 1.3543 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 13ms/step - accuracy: 0.5883 - loss: 1.1781 - val_accuracy: 0.5932 - val_loss: 1.1301 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 13ms/step - accuracy: 0.6587 - loss: 0.9884 - val_accuracy: 0.6214 - val_loss: 1.0881 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 13ms/step - accuracy: 0.6972 - loss: 0.8864 - val_accuracy: 0.6194 - val_loss: 1.0787 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7268 - loss: 0.8032 - val_accuracy: 0.6428 - val_loss: 1.0568 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 13ms/step - accuracy: 0.7618 - loss: 0.7012 - val_accuracy: 0.6274 - val_loss: 1.1299 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 13ms/step - accuracy: 0.7858 - loss: 0.6256 - val_accuracy: 0.6302 - val_loss: 1.1575 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 13ms/step - accuracy: 0.8085 - loss: 0.5616 - val_accuracy: 0.6230 - val_loss: 1.2161 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.6201 - loss: 1.2144 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step Classification Report precision recall f1-score support 0 0.63 0.68 0.65 1000 1 0.76 0.73 0.75 1000 2 0.47 0.45 0.46 1000 3 0.42 0.45 0.44 1000 4 0.49 0.65 0.56 1000 5 0.61 0.41 0.49 1000 6 0.78 0.60 0.68 1000 7 0.67 0.69 0.68 1000 8 0.72 0.78 0.74 1000 9 0.68 0.72 0.70 1000 accuracy 0.61 10000 macro avg 0.62 0.61 0.61 10000 weighted avg 0.62 0.61 0.61 10000 Accuracy Score: 0.6148 Root Mean Square Error: 2.626937380296683
In [ ]:
# Model 10: Build Model 5 with Dropout between layers
name = 'CNN_DO_MP'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 8s 9ms/step - accuracy: 0.4025 - loss: 1.6829 - val_accuracy: 0.5650 - val_loss: 1.2837 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5791 - loss: 1.2097 - val_accuracy: 0.5710 - val_loss: 1.2074 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6261 - loss: 1.0787 - val_accuracy: 0.6096 - val_loss: 1.1385 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6624 - loss: 0.9810 - val_accuracy: 0.6240 - val_loss: 1.0854 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 11ms/step - accuracy: 0.6889 - loss: 0.9107 - val_accuracy: 0.6368 - val_loss: 1.0525 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 8s 7ms/step - accuracy: 0.7034 - loss: 0.8589 - val_accuracy: 0.6412 - val_loss: 1.0537 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 6ms/step - accuracy: 0.7247 - loss: 0.8020 - val_accuracy: 0.6320 - val_loss: 1.0417 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7355 - loss: 0.7623 - val_accuracy: 0.6394 - val_loss: 1.0313 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7532 - loss: 0.7183 - val_accuracy: 0.6368 - val_loss: 1.0486 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6323 - loss: 1.0574 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.56 0.78 0.65 1000 1 0.69 0.84 0.76 1000 2 0.62 0.36 0.46 1000 3 0.49 0.40 0.44 1000 4 0.66 0.45 0.53 1000 5 0.55 0.58 0.56 1000 6 0.58 0.84 0.69 1000 7 0.71 0.72 0.71 1000 8 0.81 0.65 0.72 1000 9 0.73 0.71 0.72 1000 accuracy 0.63 10000 macro avg 0.64 0.63 0.62 10000 weighted avg 0.64 0.63 0.62 10000 Accuracy Score: 0.6332 Root Mean Square Error: 2.674827097215818
In [ ]:
data_to_send = pd.DataFrame(data)
data_to_send
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.577 | 0.476 | 0.473 | 1.229 | 1.537 | 1.530 | 54.209 |
| 1 | DNN_DNN | 0.590 | 0.477 | 0.479 | 1.225 | 1.555 | 1.541 | 66.991 |
| 2 | CNN | 0.871 | 0.604 | 0.597 | 0.405 | 1.417 | 1.462 | 45.146 |
| 3 | CNN_CNN | 0.805 | 0.618 | 0.610 | 0.565 | 1.234 | 1.269 | 77.749 |
| 4 | CNN_MP | 0.797 | 0.619 | 0.628 | 0.595 | 1.177 | 1.202 | 60.753 |
| 5 | DNN_DO | 0.582 | 0.489 | 0.483 | 1.226 | 1.529 | 1.510 | 69.229 |
| 6 | DNN_DO_DNN | 0.594 | 0.484 | 0.490 | 1.215 | 1.526 | 1.515 | 95.612 |
| 7 | CNN_DO | 0.863 | 0.611 | 0.597 | 0.407 | 1.447 | 1.500 | 68.012 |
| 8 | CNN_DO_CNN | 0.794 | 0.623 | 0.615 | 0.592 | 1.216 | 1.234 | 83.938 |
In [ ]:
# Model 11: Repeat Model 7 with Dropout after second layer
name = 'DNN_DNN_DO'
k.clear_session()
model = models.Sequential()
model.add(layers.Dense(units=128, input_shape=((32,32,3,)), activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.Dense(units=128, activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 12s 13ms/step - accuracy: 0.3729 - loss: 1.8605 - val_accuracy: 0.4620 - val_loss: 1.5710 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 9ms/step - accuracy: 0.4856 - loss: 1.5113 - val_accuracy: 0.4802 - val_loss: 1.5281 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.5136 - loss: 1.4371 - val_accuracy: 0.4870 - val_loss: 1.5044 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.5270 - loss: 1.3929 - val_accuracy: 0.4928 - val_loss: 1.4896 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.5455 - loss: 1.3491 - val_accuracy: 0.4720 - val_loss: 1.5348 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 9ms/step - accuracy: 0.5538 - loss: 1.3246 - val_accuracy: 0.4906 - val_loss: 1.4936 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 10ms/step - accuracy: 0.5642 - loss: 1.2898 - val_accuracy: 0.4966 - val_loss: 1.4976 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 9ms/step - accuracy: 0.5777 - loss: 1.2601 - val_accuracy: 0.4884 - val_loss: 1.5022 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.5902 - loss: 1.2289 - val_accuracy: 0.4980 - val_loss: 1.5112 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 9ms/step - accuracy: 0.6022 - loss: 1.2017 - val_accuracy: 0.4780 - val_loss: 1.5471 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 12s 11ms/step - accuracy: 0.6075 - loss: 1.1731 - val_accuracy: 0.4950 - val_loss: 1.5251 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 8s 11ms/step - accuracy: 0.6223 - loss: 1.1437 - val_accuracy: 0.4912 - val_loss: 1.5663 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.5023 - loss: 1.5354 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.49 0.60 0.54 1000 1 0.58 0.63 0.60 1000 2 0.39 0.36 0.38 1000 3 0.35 0.33 0.34 1000 4 0.41 0.39 0.40 1000 5 0.45 0.38 0.42 1000 6 0.53 0.52 0.52 1000 7 0.54 0.56 0.55 1000 8 0.62 0.58 0.60 1000 9 0.57 0.60 0.58 1000 accuracy 0.50 10000 macro avg 0.49 0.50 0.49 10000 weighted avg 0.49 0.50 0.49 10000 Accuracy Score: 0.4951 Root Mean Square Error: 3.1318684518989617
In [ ]:
# Model 12: Repeat Model 9 with Dropout after layers
name = 'CNN_CNN_DO'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 15s 18ms/step - accuracy: 0.3971 - loss: 1.6874 - val_accuracy: 0.5574 - val_loss: 1.2572 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 13ms/step - accuracy: 0.5913 - loss: 1.1753 - val_accuracy: 0.6130 - val_loss: 1.0983 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 13ms/step - accuracy: 0.6617 - loss: 0.9756 - val_accuracy: 0.6136 - val_loss: 1.0917 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 13ms/step - accuracy: 0.6981 - loss: 0.8729 - val_accuracy: 0.6354 - val_loss: 1.0524 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 13ms/step - accuracy: 0.7403 - loss: 0.7513 - val_accuracy: 0.6374 - val_loss: 1.0642 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 13ms/step - accuracy: 0.7654 - loss: 0.6796 - val_accuracy: 0.6426 - val_loss: 1.0352 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 13ms/step - accuracy: 0.7873 - loss: 0.6173 - val_accuracy: 0.6512 - val_loss: 1.0914 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 13ms/step - accuracy: 0.8064 - loss: 0.5554 - val_accuracy: 0.6504 - val_loss: 1.1175 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 13ms/step - accuracy: 0.8241 - loss: 0.5034 - val_accuracy: 0.6468 - val_loss: 1.1502 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 13ms/step - accuracy: 0.8377 - loss: 0.4624 - val_accuracy: 0.6486 - val_loss: 1.1895 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.6399 - loss: 1.2214 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step Classification Report precision recall f1-score support 0 0.63 0.73 0.68 1000 1 0.77 0.77 0.77 1000 2 0.48 0.51 0.49 1000 3 0.44 0.46 0.45 1000 4 0.59 0.50 0.54 1000 5 0.54 0.54 0.54 1000 6 0.71 0.74 0.72 1000 7 0.74 0.66 0.69 1000 8 0.75 0.75 0.75 1000 9 0.76 0.70 0.73 1000 accuracy 0.64 10000 macro avg 0.64 0.64 0.64 10000 weighted avg 0.64 0.64 0.64 10000 Accuracy Score: 0.6358 Root Mean Square Error: 2.554153480118217
In [ ]:
# Model 13: Rerun Model 10 with Dropout at end
name = 'CNN_MP_DO'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 8s 8ms/step - accuracy: 0.4051 - loss: 1.6842 - val_accuracy: 0.5498 - val_loss: 1.2706 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 5ms/step - accuracy: 0.5753 - loss: 1.2307 - val_accuracy: 0.6042 - val_loss: 1.1416 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6211 - loss: 1.0992 - val_accuracy: 0.6018 - val_loss: 1.1324 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 3s 5ms/step - accuracy: 0.6514 - loss: 1.0137 - val_accuracy: 0.6354 - val_loss: 1.0561 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6712 - loss: 0.9575 - val_accuracy: 0.6412 - val_loss: 1.0420 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6865 - loss: 0.9069 - val_accuracy: 0.6520 - val_loss: 1.0265 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.6968 - loss: 0.8734 - val_accuracy: 0.6420 - val_loss: 1.0328 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.7159 - loss: 0.8252 - val_accuracy: 0.6518 - val_loss: 1.0142 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7250 - loss: 0.7973 - val_accuracy: 0.6600 - val_loss: 1.0033 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.7396 - loss: 0.7576 - val_accuracy: 0.6648 - val_loss: 0.9614 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7441 - loss: 0.7353 - val_accuracy: 0.6486 - val_loss: 1.0278 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7546 - loss: 0.7124 - val_accuracy: 0.6446 - val_loss: 1.0525 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.7617 - loss: 0.6910 - val_accuracy: 0.6620 - val_loss: 1.0297 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6486 - loss: 1.0603 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.72 0.62 0.67 1000 1 0.72 0.83 0.77 1000 2 0.65 0.34 0.45 1000 3 0.42 0.58 0.49 1000 4 0.55 0.67 0.61 1000 5 0.57 0.53 0.55 1000 6 0.78 0.70 0.74 1000 7 0.75 0.71 0.73 1000 8 0.74 0.78 0.76 1000 9 0.73 0.74 0.73 1000 accuracy 0.65 10000 macro avg 0.66 0.65 0.65 10000 weighted avg 0.66 0.65 0.65 10000 Accuracy Score: 0.6507 Root Mean Square Error: 2.51073694360839
In [ ]:
# Model 14: Dense Neural Network with 2 dropout layers
name = 'DNN_DO_DNN_DO'
k.clear_session()
model = models.Sequential()
model.add(layers.Dense(units=128, input_shape=((32,32,3,)), activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.Dropout(0.25))
model.add(layers.Dense(units=128, activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 14s 14ms/step - accuracy: 0.3318 - loss: 2.0390 - val_accuracy: 0.4506 - val_loss: 1.6138 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - accuracy: 0.4708 - loss: 1.5539 - val_accuracy: 0.4840 - val_loss: 1.5283 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.4982 - loss: 1.4734 - val_accuracy: 0.4802 - val_loss: 1.5040 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.5178 - loss: 1.4194 - val_accuracy: 0.4860 - val_loss: 1.4890 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 11ms/step - accuracy: 0.5219 - loss: 1.4003 - val_accuracy: 0.4818 - val_loss: 1.5028 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - accuracy: 0.5359 - loss: 1.3663 - val_accuracy: 0.4852 - val_loss: 1.4884 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.5435 - loss: 1.3425 - val_accuracy: 0.4970 - val_loss: 1.4853 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.5569 - loss: 1.3115 - val_accuracy: 0.4898 - val_loss: 1.4991 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - accuracy: 0.5643 - loss: 1.2950 - val_accuracy: 0.4958 - val_loss: 1.4763 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.5690 - loss: 1.2810 - val_accuracy: 0.4974 - val_loss: 1.4846 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.5759 - loss: 1.2577 - val_accuracy: 0.4974 - val_loss: 1.4936 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.5800 - loss: 1.2481 - val_accuracy: 0.4914 - val_loss: 1.5046 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.5870 - loss: 1.2252 - val_accuracy: 0.4930 - val_loss: 1.5028 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.4983 - loss: 1.4879 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.53 0.59 0.56 1000 1 0.60 0.62 0.61 1000 2 0.34 0.37 0.35 1000 3 0.37 0.27 0.31 1000 4 0.39 0.48 0.43 1000 5 0.46 0.33 0.38 1000 6 0.49 0.60 0.54 1000 7 0.63 0.50 0.56 1000 8 0.61 0.64 0.62 1000 9 0.58 0.59 0.58 1000 accuracy 0.50 10000 macro avg 0.50 0.50 0.49 10000 weighted avg 0.50 0.50 0.49 10000 Accuracy Score: 0.4982 Root Mean Square Error: 3.1038685539178363
In [ ]:
# Model 15: CNN 2 layers with 2 dropout layers
name = 'CNN_DO_CNN_DO'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 17s 19ms/step - accuracy: 0.3556 - loss: 1.7880 - val_accuracy: 0.5408 - val_loss: 1.3273 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.5750 - loss: 1.2089 - val_accuracy: 0.5524 - val_loss: 1.2438 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6278 - loss: 1.0700 - val_accuracy: 0.6084 - val_loss: 1.1025 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6771 - loss: 0.9417 - val_accuracy: 0.6276 - val_loss: 1.0647 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7055 - loss: 0.8514 - val_accuracy: 0.6356 - val_loss: 1.0480 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7227 - loss: 0.7953 - val_accuracy: 0.6466 - val_loss: 1.0190 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7492 - loss: 0.7161 - val_accuracy: 0.6420 - val_loss: 1.0466 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7578 - loss: 0.6810 - val_accuracy: 0.6394 - val_loss: 1.0835 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7797 - loss: 0.6242 - val_accuracy: 0.6272 - val_loss: 1.1013 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.6365 - loss: 1.1112 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step Classification Report precision recall f1-score support 0 0.69 0.67 0.68 1000 1 0.79 0.78 0.78 1000 2 0.58 0.37 0.45 1000 3 0.45 0.39 0.42 1000 4 0.48 0.66 0.56 1000 5 0.58 0.52 0.55 1000 6 0.62 0.79 0.70 1000 7 0.72 0.69 0.70 1000 8 0.76 0.73 0.75 1000 9 0.70 0.76 0.73 1000 accuracy 0.64 10000 macro avg 0.64 0.64 0.63 10000 weighted avg 0.64 0.64 0.63 10000 Accuracy Score: 0.6351 Root Mean Square Error: 2.509760944791356
In [ ]:
# Model 16: Repeat Model 1 with Batch Normalization layer
name = 'DNN_BN'
k.clear_session()
model = models.Sequential()
model.add(layers.Dense(units=128, input_shape=((32,32,3,)), activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.BatchNormalization())
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 8ms/step - accuracy: 0.3460 - loss: 3.9538 - val_accuracy: 0.4526 - val_loss: 1.6005 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 8s 5ms/step - accuracy: 0.4562 - loss: 1.5711 - val_accuracy: 0.4258 - val_loss: 1.6523 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.4852 - loss: 1.4857 - val_accuracy: 0.4442 - val_loss: 1.6566 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5010 - loss: 1.4459 - val_accuracy: 0.3920 - val_loss: 1.9716 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.4032 - loss: 1.9097 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.66 0.39 0.49 1000 1 0.46 0.72 0.56 1000 2 0.26 0.22 0.24 1000 3 0.39 0.01 0.03 1000 4 0.23 0.71 0.35 1000 5 0.51 0.04 0.07 1000 6 0.40 0.59 0.48 1000 7 0.61 0.34 0.44 1000 8 0.61 0.54 0.57 1000 9 0.52 0.48 0.50 1000 accuracy 0.40 10000 macro avg 0.46 0.40 0.37 10000 weighted avg 0.46 0.40 0.37 10000 Accuracy Score: 0.4037 Root Mean Square Error: 3.17372021451167
In [ ]:
# Model 17: Build Model 2 with Batch Normalization between layers
name = 'DNN_BN_DNN'
k.clear_session()
model = models.Sequential()
model.add(layers.Dense(units=128, input_shape=((32,32,3,)), activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.BatchNormalization())
model.add(layers.Dense(units=128, activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 12ms/step - accuracy: 0.3847 - loss: 2.8307 - val_accuracy: 0.4450 - val_loss: 1.6967 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 9ms/step - accuracy: 0.4873 - loss: 1.5700 - val_accuracy: 0.4456 - val_loss: 1.7007 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 9ms/step - accuracy: 0.5192 - loss: 1.4668 - val_accuracy: 0.4614 - val_loss: 1.6424 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.5477 - loss: 1.3764 - val_accuracy: 0.4344 - val_loss: 1.7468 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.5798 - loss: 1.2779 - val_accuracy: 0.4578 - val_loss: 1.7089 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 9ms/step - accuracy: 0.6158 - loss: 1.1737 - val_accuracy: 0.4360 - val_loss: 1.7442 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.4435 - loss: 1.7083 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step Classification Report precision recall f1-score support 0 0.57 0.39 0.47 1000 1 0.60 0.51 0.55 1000 2 0.27 0.43 0.33 1000 3 0.30 0.34 0.32 1000 4 0.37 0.28 0.32 1000 5 0.33 0.37 0.35 1000 6 0.47 0.44 0.45 1000 7 0.47 0.55 0.50 1000 8 0.62 0.53 0.57 1000 9 0.55 0.52 0.53 1000 accuracy 0.44 10000 macro avg 0.46 0.44 0.44 10000 weighted avg 0.46 0.44 0.44 10000 Accuracy Score: 0.4352 Root Mean Square Error: 3.170709699736007
In [ ]:
# Model 18: Build Model 3 with Batch Normalization after Conv2D
name = 'CNN_BN'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.BatchNormalization())
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 8s 9ms/step - accuracy: 0.4195 - loss: 2.0144 - val_accuracy: 0.3636 - val_loss: 1.8323 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5830 - loss: 1.2044 - val_accuracy: 0.5398 - val_loss: 1.3642 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6594 - loss: 0.9910 - val_accuracy: 0.5210 - val_loss: 1.5085 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 6ms/step - accuracy: 0.7129 - loss: 0.8301 - val_accuracy: 0.4996 - val_loss: 1.6845 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7583 - loss: 0.6979 - val_accuracy: 0.4226 - val_loss: 2.6495 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.4275 - loss: 2.6161 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.72 0.21 0.33 1000 1 0.66 0.59 0.62 1000 2 0.58 0.06 0.12 1000 3 0.60 0.05 0.09 1000 4 0.32 0.55 0.41 1000 5 0.64 0.11 0.18 1000 6 0.28 0.92 0.43 1000 7 0.65 0.45 0.53 1000 8 0.62 0.59 0.61 1000 9 0.41 0.77 0.54 1000 accuracy 0.43 10000 macro avg 0.55 0.43 0.39 10000 weighted avg 0.55 0.43 0.39 10000 Accuracy Score: 0.4307 Root Mean Square Error: 3.316986584235758
In [ ]:
# Model 19: Build Model 4 with Batch Normalization between layers
name = 'CNN_BN_CNN'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.BatchNormalization())
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 14s 17ms/step - accuracy: 0.4121 - loss: 2.2139 - val_accuracy: 0.5176 - val_loss: 1.3713 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 13ms/step - accuracy: 0.6913 - loss: 0.9043 - val_accuracy: 0.6236 - val_loss: 1.1107 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 13ms/step - accuracy: 0.7909 - loss: 0.6170 - val_accuracy: 0.5878 - val_loss: 1.2745 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 13ms/step - accuracy: 0.8847 - loss: 0.3436 - val_accuracy: 0.6090 - val_loss: 1.4688 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 13ms/step - accuracy: 0.9371 - loss: 0.1859 - val_accuracy: 0.5662 - val_loss: 2.3577 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 6ms/step - accuracy: 0.5762 - loss: 2.3786 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step Classification Report precision recall f1-score support 0 0.71 0.38 0.49 1000 1 0.86 0.51 0.64 1000 2 0.42 0.41 0.42 1000 3 0.42 0.45 0.43 1000 4 0.54 0.61 0.57 1000 5 0.55 0.52 0.54 1000 6 0.74 0.69 0.71 1000 7 0.61 0.68 0.64 1000 8 0.78 0.55 0.64 1000 9 0.43 0.87 0.57 1000 accuracy 0.56 10000 macro avg 0.61 0.56 0.57 10000 weighted avg 0.61 0.56 0.57 10000 Accuracy Score: 0.5645 Root Mean Square Error: 2.9678106408596894
In [ ]:
# Model 20: Build Model 5 with BatchNormalization between layers
name = 'CNN_BN_MP'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.BatchNormalization())
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 10ms/step - accuracy: 0.4685 - loss: 1.5865 - val_accuracy: 0.5294 - val_loss: 1.3460 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 5ms/step - accuracy: 0.6116 - loss: 1.1211 - val_accuracy: 0.5160 - val_loss: 1.4697 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6535 - loss: 0.9976 - val_accuracy: 0.4434 - val_loss: 1.9761 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6899 - loss: 0.8928 - val_accuracy: 0.5676 - val_loss: 1.3926 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7247 - loss: 0.7971 - val_accuracy: 0.5952 - val_loss: 1.2737 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7469 - loss: 0.7310 - val_accuracy: 0.5400 - val_loss: 1.6234 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.7677 - loss: 0.6687 - val_accuracy: 0.6050 - val_loss: 1.3878 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 6ms/step - accuracy: 0.7932 - loss: 0.5976 - val_accuracy: 0.5850 - val_loss: 1.5890 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.8100 - loss: 0.5488 - val_accuracy: 0.5658 - val_loss: 1.6563 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.8208 - loss: 0.5110 - val_accuracy: 0.4468 - val_loss: 2.8566 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.4398 - loss: 2.8983 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.64 0.24 0.35 1000 1 0.78 0.49 0.60 1000 2 0.53 0.15 0.23 1000 3 0.49 0.17 0.25 1000 4 0.40 0.55 0.46 1000 5 0.55 0.12 0.20 1000 6 0.89 0.25 0.39 1000 7 0.33 0.85 0.48 1000 8 0.34 0.92 0.49 1000 9 0.54 0.67 0.59 1000 accuracy 0.44 10000 macro avg 0.55 0.44 0.41 10000 weighted avg 0.55 0.44 0.41 10000 Accuracy Score: 0.44 Root Mean Square Error: 3.5488448824934573
In [ ]:
data_to_send = pd.DataFrame(data)
data_to_send
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.577 | 0.476 | 0.473 | 1.229 | 1.537 | 1.530 | 54.209 |
| 1 | DNN_DNN | 0.590 | 0.477 | 0.479 | 1.225 | 1.555 | 1.541 | 66.991 |
| 2 | CNN | 0.871 | 0.604 | 0.597 | 0.405 | 1.417 | 1.462 | 45.146 |
| 3 | CNN_CNN | 0.805 | 0.618 | 0.610 | 0.565 | 1.234 | 1.269 | 77.749 |
| 4 | CNN_MP | 0.797 | 0.619 | 0.628 | 0.595 | 1.177 | 1.202 | 60.753 |
| 5 | DNN_DO | 0.582 | 0.489 | 0.483 | 1.226 | 1.529 | 1.510 | 69.229 |
| 6 | DNN_DO_DNN | 0.594 | 0.484 | 0.490 | 1.215 | 1.526 | 1.515 | 95.612 |
| 7 | CNN_DO | 0.863 | 0.611 | 0.597 | 0.407 | 1.447 | 1.500 | 68.012 |
| 8 | CNN_DO_CNN | 0.794 | 0.623 | 0.615 | 0.592 | 1.216 | 1.234 | 83.938 |
| 9 | CNN_DO_MP | 0.748 | 0.637 | 0.633 | 0.735 | 1.049 | 1.068 | 57.858 |
| 10 | DNN_DNN_DO | 0.614 | 0.491 | 0.495 | 1.162 | 1.566 | 1.542 | 120.223 |
| 11 | CNN_CNN_DO | 0.826 | 0.649 | 0.636 | 0.490 | 1.190 | 1.231 | 103.162 |
| 12 | CNN_MP_DO | 0.759 | 0.662 | 0.651 | 0.700 | 1.030 | 1.067 | 66.997 |
| 13 | DNN_DO_DNN_DO | 0.583 | 0.493 | 0.498 | 1.239 | 1.503 | 1.488 | 128.963 |
| 14 | CNN_DO_CNN_DO | 0.771 | 0.627 | 0.635 | 0.653 | 1.101 | 1.123 | 98.968 |
| 15 | DNN_BN | 0.500 | 0.392 | 0.404 | 1.449 | 1.972 | 1.922 | 25.628 |
| 16 | DNN_BN_DNN | 0.608 | 0.436 | 0.435 | 1.198 | 1.744 | 1.713 | 56.549 |
| 17 | CNN_BN | 0.747 | 0.423 | 0.431 | 0.728 | 2.650 | 2.620 | 30.352 |
| 18 | CNN_BN_CNN | 0.927 | 0.566 | 0.564 | 0.212 | 2.358 | 2.438 | 56.151 |
| 19 | CNN_BN_MP | 0.808 | 0.447 | 0.440 | 0.542 | 2.857 | 2.906 | 55.307 |
In [ ]:
# Model 21: Repeat Model 7 with BatchNormalization after second layer
name = 'DNN_DNN_BN'
k.clear_session()
model = models.Sequential()
model.add(layers.Dense(units=128, input_shape=((32,32,3,)), activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.Dense(units=128, activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.BatchNormalization())
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 19s 23ms/step - accuracy: 0.3726 - loss: 2.2762 - val_accuracy: 0.4148 - val_loss: 1.6842 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 11ms/step - accuracy: 0.4510 - loss: 1.6108 - val_accuracy: 0.4220 - val_loss: 1.6763 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 9ms/step - accuracy: 0.4745 - loss: 1.5393 - val_accuracy: 0.4540 - val_loss: 1.5777 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 9ms/step - accuracy: 0.4832 - loss: 1.5068 - val_accuracy: 0.4126 - val_loss: 1.7088 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.4949 - loss: 1.4675 - val_accuracy: 0.4534 - val_loss: 1.6133 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.5053 - loss: 1.4416 - val_accuracy: 0.4234 - val_loss: 1.6401 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.4218 - loss: 1.6176 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.56 0.37 0.44 1000 1 0.45 0.66 0.53 1000 2 0.32 0.12 0.17 1000 3 0.31 0.18 0.23 1000 4 0.28 0.60 0.38 1000 5 0.42 0.28 0.33 1000 6 0.48 0.47 0.48 1000 7 0.65 0.43 0.52 1000 8 0.40 0.75 0.52 1000 9 0.59 0.37 0.46 1000 accuracy 0.42 10000 macro avg 0.45 0.42 0.41 10000 weighted avg 0.45 0.42 0.41 10000 Accuracy Score: 0.4228 Root Mean Square Error: 3.3392364396670087
In [ ]:
# Model 22: Repeat Model 9 with Dropout after layers
name = 'CNN_CNN_BN'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.BatchNormalization())
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 17s 19ms/step - accuracy: 0.4030 - loss: 1.7821 - val_accuracy: 0.4110 - val_loss: 1.6443 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 15s 14ms/step - accuracy: 0.5834 - loss: 1.1844 - val_accuracy: 0.5408 - val_loss: 1.3653 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 13ms/step - accuracy: 0.6672 - loss: 0.9593 - val_accuracy: 0.6048 - val_loss: 1.1712 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 14ms/step - accuracy: 0.7326 - loss: 0.7712 - val_accuracy: 0.5740 - val_loss: 1.3309 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 13ms/step - accuracy: 0.7804 - loss: 0.6384 - val_accuracy: 0.5994 - val_loss: 1.2877 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 14ms/step - accuracy: 0.8244 - loss: 0.5129 - val_accuracy: 0.5624 - val_loss: 1.5650 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 3s 7ms/step - accuracy: 0.5619 - loss: 1.5707 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step Classification Report precision recall f1-score support 0 0.64 0.48 0.55 1000 1 0.70 0.68 0.69 1000 2 0.43 0.52 0.47 1000 3 0.39 0.37 0.38 1000 4 0.54 0.41 0.47 1000 5 0.45 0.46 0.46 1000 6 0.72 0.63 0.67 1000 7 0.76 0.52 0.62 1000 8 0.49 0.85 0.63 1000 9 0.64 0.66 0.65 1000 accuracy 0.56 10000 macro avg 0.58 0.56 0.56 10000 weighted avg 0.58 0.56 0.56 10000 Accuracy Score: 0.5589 Root Mean Square Error: 2.9188525142596706
In [ ]:
# Model 23: Rerun Model 10 with BatchNormalization at end
name = 'CNN_MP_BN'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.BatchNormalization())
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 10ms/step - accuracy: 0.4806 - loss: 1.5841 - val_accuracy: 0.5206 - val_loss: 1.3561 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 5ms/step - accuracy: 0.6283 - loss: 1.0771 - val_accuracy: 0.4900 - val_loss: 1.5948 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6714 - loss: 0.9517 - val_accuracy: 0.5452 - val_loss: 1.3666 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.6966 - loss: 0.8778 - val_accuracy: 0.6238 - val_loss: 1.1495 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7186 - loss: 0.8086 - val_accuracy: 0.5838 - val_loss: 1.3307 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7440 - loss: 0.7327 - val_accuracy: 0.5700 - val_loss: 1.5170 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - accuracy: 0.7759 - loss: 0.6553 - val_accuracy: 0.5974 - val_loss: 1.3434 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.5897 - loss: 1.3679 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.56 0.76 0.64 1000 1 0.76 0.74 0.75 1000 2 0.45 0.46 0.45 1000 3 0.39 0.50 0.44 1000 4 0.54 0.46 0.50 1000 5 0.50 0.46 0.48 1000 6 0.55 0.83 0.66 1000 7 0.82 0.49 0.62 1000 8 0.74 0.68 0.71 1000 9 0.83 0.55 0.66 1000 accuracy 0.59 10000 macro avg 0.62 0.59 0.59 10000 weighted avg 0.62 0.59 0.59 10000 Accuracy Score: 0.5901 Root Mean Square Error: 2.7073972741361767
In [ ]:
# Model 24: Dense Neural Network with 2 BatchNormalization layers
name = 'DNN_BN_DNN_BN'
k.clear_session()
model = models.Sequential()
model.add(layers.Dense(units=128, input_shape=((32,32,3,)), activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.BatchNormalization())
model.add(layers.Dense(units=128, activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.BatchNormalization())
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 12s 13ms/step - accuracy: 0.3178 - loss: 12.9698 - val_accuracy: 0.3548 - val_loss: 2.3091 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - accuracy: 0.4456 - loss: 1.8536 - val_accuracy: 0.4338 - val_loss: 1.8169 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - accuracy: 0.4984 - loss: 1.5713 - val_accuracy: 0.4602 - val_loss: 1.6753 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - accuracy: 0.5071 - loss: 1.5283 - val_accuracy: 0.4692 - val_loss: 1.6645 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - accuracy: 0.5220 - loss: 1.4721 - val_accuracy: 0.4456 - val_loss: 1.6963 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.5211 - loss: 1.4653 - val_accuracy: 0.4404 - val_loss: 1.7063 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 10ms/step - accuracy: 0.5315 - loss: 1.4196 - val_accuracy: 0.4608 - val_loss: 1.5918 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.4713 - loss: 1.5585 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.47 0.57 0.52 1000 1 0.52 0.65 0.58 1000 2 0.35 0.26 0.30 1000 3 0.35 0.35 0.35 1000 4 0.40 0.40 0.40 1000 5 0.42 0.29 0.34 1000 6 0.48 0.54 0.51 1000 7 0.56 0.50 0.53 1000 8 0.57 0.60 0.59 1000 9 0.52 0.53 0.52 1000 accuracy 0.47 10000 macro avg 0.46 0.47 0.46 10000 weighted avg 0.46 0.47 0.46 10000 Accuracy Score: 0.4698 Root Mean Square Error: 3.2659148794786432
In [ ]:
# Model 25: CNN 2 layers with 2 BatchNormalization layers
name = 'CNN_BN_CNN_BN'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.BatchNormalization())
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.BatchNormalization())
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 18ms/step - accuracy: 0.3755 - loss: 5.3699 - val_accuracy: 0.4360 - val_loss: 1.6429 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.5704 - loss: 1.2296 - val_accuracy: 0.4548 - val_loss: 1.6437 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6242 - loss: 1.0794 - val_accuracy: 0.5278 - val_loss: 1.3497 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6577 - loss: 0.9888 - val_accuracy: 0.5260 - val_loss: 1.5454 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6816 - loss: 0.9195 - val_accuracy: 0.5652 - val_loss: 1.3240 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 15ms/step - accuracy: 0.7091 - loss: 0.8375 - val_accuracy: 0.5694 - val_loss: 1.3706 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 15ms/step - accuracy: 0.7355 - loss: 0.7636 - val_accuracy: 0.5710 - val_loss: 1.4136 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7700 - loss: 0.6638 - val_accuracy: 0.5456 - val_loss: 1.5248 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7984 - loss: 0.5922 - val_accuracy: 0.5312 - val_loss: 1.7024 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8192 - loss: 0.5208 - val_accuracy: 0.5412 - val_loss: 1.8428 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.5516 - loss: 1.8169 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step Classification Report precision recall f1-score support 0 0.66 0.47 0.55 1000 1 0.72 0.64 0.68 1000 2 0.44 0.36 0.40 1000 3 0.34 0.43 0.38 1000 4 0.53 0.52 0.53 1000 5 0.40 0.55 0.46 1000 6 0.57 0.71 0.64 1000 7 0.64 0.57 0.60 1000 8 0.70 0.63 0.66 1000 9 0.68 0.62 0.65 1000 accuracy 0.55 10000 macro avg 0.57 0.55 0.55 10000 weighted avg 0.57 0.55 0.55 10000 Accuracy Score: 0.5511 Root Mean Square Error: 2.7692417734824093
In [ ]:
# Model 26: Dense Neural Network with Dropout and BatchNormalization layers
name = 'DNN_DO_DNN_BN'
k.clear_session()
model = models.Sequential()
model.add(layers.Dense(units=128, input_shape=((32,32,3,)), activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.Dropout(0.25))
model.add(layers.Dense(units=128, activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.BatchNormalization())
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 23s 19ms/step - accuracy: 0.3420 - loss: 2.2513 - val_accuracy: 0.4258 - val_loss: 1.6862 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 10ms/step - accuracy: 0.4606 - loss: 1.5893 - val_accuracy: 0.4414 - val_loss: 1.6170 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.4894 - loss: 1.4887 - val_accuracy: 0.4584 - val_loss: 1.5688 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - accuracy: 0.5001 - loss: 1.4418 - val_accuracy: 0.4598 - val_loss: 1.5500 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - accuracy: 0.5202 - loss: 1.3903 - val_accuracy: 0.4676 - val_loss: 1.5402 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - accuracy: 0.5324 - loss: 1.3540 - val_accuracy: 0.4704 - val_loss: 1.5399 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.5382 - loss: 1.3272 - val_accuracy: 0.4634 - val_loss: 1.5649 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - accuracy: 0.5518 - loss: 1.3028 - val_accuracy: 0.4564 - val_loss: 1.5683 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 11ms/step - accuracy: 0.5652 - loss: 1.2652 - val_accuracy: 0.4568 - val_loss: 1.6074 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.4564 - loss: 1.5850 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.51 0.53 0.52 1000 1 0.60 0.54 0.57 1000 2 0.37 0.35 0.36 1000 3 0.29 0.35 0.31 1000 4 0.38 0.22 0.28 1000 5 0.41 0.39 0.40 1000 6 0.48 0.55 0.51 1000 7 0.47 0.58 0.52 1000 8 0.68 0.46 0.55 1000 9 0.49 0.64 0.55 1000 accuracy 0.46 10000 macro avg 0.47 0.46 0.46 10000 weighted avg 0.47 0.46 0.46 10000 Accuracy Score: 0.4594 Root Mean Square Error: 3.214125075350989
In [ ]:
# Model 27: Dense Neural Network with Dropout and BatchNormalization layers switched from model 26
name = 'DNN_BN_DNN_DO'
k.clear_session()
model = models.Sequential()
model.add(layers.Dense(units=128, input_shape=((32,32,3,)), activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.BatchNormalization())
model.add(layers.Dense(units=128, activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 14s 14ms/step - accuracy: 0.3852 - loss: 2.6202 - val_accuracy: 0.4098 - val_loss: 1.7774 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 10ms/step - accuracy: 0.4831 - loss: 1.5621 - val_accuracy: 0.4266 - val_loss: 1.6861 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 11ms/step - accuracy: 0.5115 - loss: 1.4791 - val_accuracy: 0.4770 - val_loss: 1.5914 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - accuracy: 0.5391 - loss: 1.3854 - val_accuracy: 0.4682 - val_loss: 1.6094 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.5664 - loss: 1.3090 - val_accuracy: 0.4676 - val_loss: 1.6400 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.5928 - loss: 1.2285 - val_accuracy: 0.4648 - val_loss: 1.7025 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.4677 - loss: 1.6527 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step Classification Report precision recall f1-score support 0 0.54 0.52 0.53 1000 1 0.54 0.61 0.58 1000 2 0.39 0.25 0.30 1000 3 0.33 0.33 0.33 1000 4 0.38 0.45 0.41 1000 5 0.37 0.39 0.38 1000 6 0.51 0.47 0.49 1000 7 0.52 0.53 0.53 1000 8 0.61 0.56 0.58 1000 9 0.53 0.60 0.56 1000 accuracy 0.47 10000 macro avg 0.47 0.47 0.47 10000 weighted avg 0.47 0.47 0.47 10000 Accuracy Score: 0.4709 Root Mean Square Error: 3.1688957067092
In [ ]:
# Model 28: CNN 2 layers with 2 dropout layers
name = 'CNN_BN_CNN_DO'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.BatchNormalization())
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 18ms/step - accuracy: 0.4473 - loss: 2.0275 - val_accuracy: 0.5872 - val_loss: 1.1861 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6879 - loss: 0.9126 - val_accuracy: 0.5508 - val_loss: 1.3799 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7786 - loss: 0.6322 - val_accuracy: 0.6430 - val_loss: 1.2815 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8581 - loss: 0.4098 - val_accuracy: 0.6118 - val_loss: 1.4337 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.8936 - loss: 0.3048 - val_accuracy: 0.6334 - val_loss: 1.3715 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.9208 - loss: 0.2264 - val_accuracy: 0.6152 - val_loss: 1.7616 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.6248 - loss: 1.7919 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step Classification Report precision recall f1-score support 0 0.71 0.60 0.65 1000 1 0.70 0.79 0.74 1000 2 0.55 0.42 0.48 1000 3 0.41 0.47 0.44 1000 4 0.63 0.49 0.55 1000 5 0.50 0.62 0.55 1000 6 0.75 0.68 0.71 1000 7 0.57 0.76 0.65 1000 8 0.83 0.59 0.69 1000 9 0.66 0.77 0.71 1000 accuracy 0.62 10000 macro avg 0.63 0.62 0.62 10000 weighted avg 0.63 0.62 0.62 10000 Accuracy Score: 0.619 Root Mean Square Error: 2.636873148257231
In [ ]:
# Model 29: CNN 2 layers with 2 dropout layers
name = 'CNN_DO_CNN_BN'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.BatchNormalization())
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 18ms/step - accuracy: 0.4141 - loss: 1.7111 - val_accuracy: 0.4732 - val_loss: 1.4786 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.5714 - loss: 1.2273 - val_accuracy: 0.4878 - val_loss: 1.4466 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6308 - loss: 1.0453 - val_accuracy: 0.6006 - val_loss: 1.1613 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6884 - loss: 0.8876 - val_accuracy: 0.5928 - val_loss: 1.1940 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7411 - loss: 0.7471 - val_accuracy: 0.5950 - val_loss: 1.2556 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7754 - loss: 0.6326 - val_accuracy: 0.5322 - val_loss: 1.5357 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 6ms/step - accuracy: 0.5518 - loss: 1.4983 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step Classification Report precision recall f1-score support 0 0.66 0.58 0.62 1000 1 0.68 0.76 0.72 1000 2 0.43 0.46 0.45 1000 3 0.32 0.44 0.37 1000 4 0.61 0.33 0.42 1000 5 0.33 0.72 0.45 1000 6 0.71 0.58 0.64 1000 7 0.65 0.57 0.61 1000 8 0.82 0.60 0.70 1000 9 0.88 0.38 0.53 1000 accuracy 0.54 10000 macro avg 0.61 0.54 0.55 10000 weighted avg 0.61 0.54 0.55 10000 Accuracy Score: 0.5417 Root Mean Square Error: 2.7038491082159153
In [ ]:
data_df = pd.DataFrame(data)
data_df
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.577 | 0.476 | 0.473 | 1.229 | 1.537 | 1.530 | 54.209 |
| 1 | DNN_DNN | 0.590 | 0.477 | 0.479 | 1.225 | 1.555 | 1.541 | 66.991 |
| 2 | CNN | 0.871 | 0.604 | 0.597 | 0.405 | 1.417 | 1.462 | 45.146 |
| 3 | CNN_CNN | 0.805 | 0.618 | 0.610 | 0.565 | 1.234 | 1.269 | 77.749 |
| 4 | CNN_MP | 0.797 | 0.619 | 0.628 | 0.595 | 1.177 | 1.202 | 60.753 |
| 5 | DNN_DO | 0.582 | 0.489 | 0.483 | 1.226 | 1.529 | 1.510 | 69.229 |
| 6 | DNN_DO_DNN | 0.594 | 0.484 | 0.490 | 1.215 | 1.526 | 1.515 | 95.612 |
| 7 | CNN_DO | 0.863 | 0.611 | 0.597 | 0.407 | 1.447 | 1.500 | 68.012 |
| 8 | CNN_DO_CNN | 0.794 | 0.623 | 0.615 | 0.592 | 1.216 | 1.234 | 83.938 |
| 9 | CNN_DO_MP | 0.748 | 0.637 | 0.633 | 0.735 | 1.049 | 1.068 | 57.858 |
| 10 | DNN_DNN_DO | 0.614 | 0.491 | 0.495 | 1.162 | 1.566 | 1.542 | 120.223 |
| 11 | CNN_CNN_DO | 0.826 | 0.649 | 0.636 | 0.490 | 1.190 | 1.231 | 103.162 |
| 12 | CNN_MP_DO | 0.759 | 0.662 | 0.651 | 0.700 | 1.030 | 1.067 | 66.997 |
| 13 | DNN_DO_DNN_DO | 0.583 | 0.493 | 0.498 | 1.239 | 1.503 | 1.488 | 128.963 |
| 14 | CNN_DO_CNN_DO | 0.771 | 0.627 | 0.635 | 0.653 | 1.101 | 1.123 | 98.968 |
| 15 | DNN_BN | 0.500 | 0.392 | 0.404 | 1.449 | 1.972 | 1.922 | 25.628 |
| 16 | DNN_BN_DNN | 0.608 | 0.436 | 0.435 | 1.198 | 1.744 | 1.713 | 56.549 |
| 17 | CNN_BN | 0.747 | 0.423 | 0.431 | 0.728 | 2.650 | 2.620 | 30.352 |
| 18 | CNN_BN_CNN | 0.927 | 0.566 | 0.564 | 0.212 | 2.358 | 2.438 | 56.151 |
| 19 | CNN_BN_MP | 0.808 | 0.447 | 0.440 | 0.542 | 2.857 | 2.906 | 55.307 |
| 20 | DNN_DNN_BN | 0.501 | 0.423 | 0.423 | 1.453 | 1.640 | 1.616 | 68.419 |
| 21 | CNN_CNN_BN | 0.809 | 0.562 | 0.559 | 0.555 | 1.565 | 1.573 | 74.480 |
| 22 | CNN_MP_BN | 0.760 | 0.597 | 0.590 | 0.694 | 1.343 | 1.391 | 39.724 |
| 23 | DNN_BN_DNN_BN | 0.527 | 0.461 | 0.470 | 1.429 | 1.592 | 1.562 | 66.279 |
| 24 | CNN_BN_CNN_BN | 0.802 | 0.541 | 0.551 | 0.565 | 1.843 | 1.793 | 108.790 |
| 25 | DNN_DO_DNN_BN | 0.556 | 0.457 | 0.459 | 1.287 | 1.607 | 1.591 | 95.676 |
| 26 | DNN_BN_DNN_DO | 0.581 | 0.465 | 0.471 | 1.253 | 1.702 | 1.665 | 72.696 |
| 27 | CNN_BN_CNN_DO | 0.915 | 0.615 | 0.619 | 0.245 | 1.762 | 1.807 | 67.884 |
| 28 | CNN_DO_CNN_BN | 0.759 | 0.532 | 0.542 | 0.681 | 1.536 | 1.542 | 67.479 |
In [ ]:
# Model 30: CNN | Dropout | DNN
name = 'CNN_DO_DNN'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.Dense(units=128, activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 17s 19ms/step - accuracy: 0.3750 - loss: 1.8488 - val_accuracy: 0.5448 - val_loss: 1.3224 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - accuracy: 0.5827 - loss: 1.2376 - val_accuracy: 0.5768 - val_loss: 1.2288 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.6335 - loss: 1.0870 - val_accuracy: 0.5940 - val_loss: 1.1810 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.6741 - loss: 0.9739 - val_accuracy: 0.6004 - val_loss: 1.1589 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - accuracy: 0.7172 - loss: 0.8478 - val_accuracy: 0.6060 - val_loss: 1.1815 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.7552 - loss: 0.7544 - val_accuracy: 0.6104 - val_loss: 1.1796 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.7843 - loss: 0.6709 - val_accuracy: 0.6132 - val_loss: 1.2379 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.8102 - loss: 0.5989 - val_accuracy: 0.6122 - val_loss: 1.2584 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - accuracy: 0.8418 - loss: 0.5162 - val_accuracy: 0.6170 - val_loss: 1.3183 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.8662 - loss: 0.4555 - val_accuracy: 0.6016 - val_loss: 1.4114 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.8852 - loss: 0.4014 - val_accuracy: 0.5900 - val_loss: 1.4980 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.9002 - loss: 0.3576 - val_accuracy: 0.5954 - val_loss: 1.5696 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.5959 - loss: 1.6061 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.59 0.66 0.62 1000 1 0.74 0.74 0.74 1000 2 0.51 0.35 0.41 1000 3 0.42 0.39 0.40 1000 4 0.57 0.41 0.48 1000 5 0.48 0.51 0.50 1000 6 0.66 0.68 0.67 1000 7 0.55 0.73 0.63 1000 8 0.70 0.74 0.72 1000 9 0.67 0.71 0.69 1000 accuracy 0.59 10000 macro avg 0.59 0.59 0.59 10000 weighted avg 0.59 0.59 0.59 10000 Accuracy Score: 0.5923 Root Mean Square Error: 2.7498909069270367
In [ ]:
# Model 31: DNN | Dropout | CNN
name = 'DNN_DO_CNN'
k.clear_session()
model = models.Sequential()
model.add(layers.Dense(units=128, activation=tf.nn.relu, input_shape=((32,32,3,)), kernel_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 17s 19ms/step - accuracy: 0.3407 - loss: 1.8276 - val_accuracy: 0.4862 - val_loss: 1.4570 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.5268 - loss: 1.3507 - val_accuracy: 0.5310 - val_loss: 1.3320 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.5713 - loss: 1.2191 - val_accuracy: 0.5528 - val_loss: 1.2749 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6004 - loss: 1.1560 - val_accuracy: 0.5424 - val_loss: 1.3200 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6171 - loss: 1.1028 - val_accuracy: 0.5470 - val_loss: 1.2899 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 13ms/step - accuracy: 0.6243 - loss: 1.0808 - val_accuracy: 0.5642 - val_loss: 1.2444 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 13ms/step - accuracy: 0.6448 - loss: 1.0225 - val_accuracy: 0.5732 - val_loss: 1.2334 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6701 - loss: 0.9531 - val_accuracy: 0.5658 - val_loss: 1.2632 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.6877 - loss: 0.9025 - val_accuracy: 0.5840 - val_loss: 1.2383 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 13ms/step - accuracy: 0.7006 - loss: 0.8672 - val_accuracy: 0.5692 - val_loss: 1.2791 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 13ms/step - accuracy: 0.7143 - loss: 0.8185 - val_accuracy: 0.5734 - val_loss: 1.2809 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 13ms/step - accuracy: 0.7312 - loss: 0.7843 - val_accuracy: 0.5770 - val_loss: 1.2906 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.5761 - loss: 1.2778 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step Classification Report precision recall f1-score support 0 0.58 0.69 0.63 1000 1 0.76 0.67 0.71 1000 2 0.44 0.41 0.42 1000 3 0.38 0.48 0.42 1000 4 0.56 0.40 0.47 1000 5 0.52 0.41 0.46 1000 6 0.69 0.69 0.69 1000 7 0.55 0.72 0.62 1000 8 0.71 0.63 0.67 1000 9 0.66 0.66 0.66 1000 accuracy 0.58 10000 macro avg 0.58 0.58 0.58 10000 weighted avg 0.58 0.58 0.58 10000 Accuracy Score: 0.577 Root Mean Square Error: 2.8256503676145073
In [ ]:
data_df = pd.DataFrame(data)
data_df
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.577 | 0.476 | 0.473 | 1.229 | 1.537 | 1.530 | 54.209 |
| 1 | DNN_DNN | 0.590 | 0.477 | 0.479 | 1.225 | 1.555 | 1.541 | 66.991 |
| 2 | CNN | 0.871 | 0.604 | 0.597 | 0.405 | 1.417 | 1.462 | 45.146 |
| 3 | CNN_CNN | 0.805 | 0.618 | 0.610 | 0.565 | 1.234 | 1.269 | 77.749 |
| 4 | CNN_MP | 0.797 | 0.619 | 0.628 | 0.595 | 1.177 | 1.202 | 60.753 |
| 5 | DNN_DO | 0.582 | 0.489 | 0.483 | 1.226 | 1.529 | 1.510 | 69.229 |
| 6 | DNN_DO_DNN | 0.594 | 0.484 | 0.490 | 1.215 | 1.526 | 1.515 | 95.612 |
| 7 | CNN_DO | 0.863 | 0.611 | 0.597 | 0.407 | 1.447 | 1.500 | 68.012 |
| 8 | CNN_DO_CNN | 0.794 | 0.623 | 0.615 | 0.592 | 1.216 | 1.234 | 83.938 |
| 9 | CNN_DO_MP | 0.748 | 0.637 | 0.633 | 0.735 | 1.049 | 1.068 | 57.858 |
| 10 | DNN_DNN_DO | 0.614 | 0.491 | 0.495 | 1.162 | 1.566 | 1.542 | 120.223 |
| 11 | CNN_CNN_DO | 0.826 | 0.649 | 0.636 | 0.490 | 1.190 | 1.231 | 103.162 |
| 12 | CNN_MP_DO | 0.759 | 0.662 | 0.651 | 0.700 | 1.030 | 1.067 | 66.997 |
| 13 | DNN_DO_DNN_DO | 0.583 | 0.493 | 0.498 | 1.239 | 1.503 | 1.488 | 128.963 |
| 14 | CNN_DO_CNN_DO | 0.771 | 0.627 | 0.635 | 0.653 | 1.101 | 1.123 | 98.968 |
| 15 | DNN_BN | 0.500 | 0.392 | 0.404 | 1.449 | 1.972 | 1.922 | 25.628 |
| 16 | DNN_BN_DNN | 0.608 | 0.436 | 0.435 | 1.198 | 1.744 | 1.713 | 56.549 |
| 17 | CNN_BN | 0.747 | 0.423 | 0.431 | 0.728 | 2.650 | 2.620 | 30.352 |
| 18 | CNN_BN_CNN | 0.927 | 0.566 | 0.564 | 0.212 | 2.358 | 2.438 | 56.151 |
| 19 | CNN_BN_MP | 0.808 | 0.447 | 0.440 | 0.542 | 2.857 | 2.906 | 55.307 |
| 20 | DNN_DNN_BN | 0.501 | 0.423 | 0.423 | 1.453 | 1.640 | 1.616 | 68.419 |
| 21 | CNN_CNN_BN | 0.809 | 0.562 | 0.559 | 0.555 | 1.565 | 1.573 | 74.480 |
| 22 | CNN_MP_BN | 0.760 | 0.597 | 0.590 | 0.694 | 1.343 | 1.391 | 39.724 |
| 23 | DNN_BN_DNN_BN | 0.527 | 0.461 | 0.470 | 1.429 | 1.592 | 1.562 | 66.279 |
| 24 | CNN_BN_CNN_BN | 0.802 | 0.541 | 0.551 | 0.565 | 1.843 | 1.793 | 108.790 |
| 25 | DNN_DO_DNN_BN | 0.556 | 0.457 | 0.459 | 1.287 | 1.607 | 1.591 | 95.676 |
| 26 | DNN_BN_DNN_DO | 0.581 | 0.465 | 0.471 | 1.253 | 1.702 | 1.665 | 72.696 |
| 27 | CNN_BN_CNN_DO | 0.915 | 0.615 | 0.619 | 0.245 | 1.762 | 1.807 | 67.884 |
| 28 | CNN_DO_CNN_BN | 0.759 | 0.532 | 0.542 | 0.681 | 1.536 | 1.542 | 67.479 |
| 29 | CNN_DO_DNN | 0.891 | 0.595 | 0.592 | 0.378 | 1.570 | 1.642 | 121.519 |
| 30 | DNN_DO_CNN | 0.719 | 0.577 | 0.577 | 0.813 | 1.291 | 1.292 | 128.007 |
In [ ]:
# Build Model 32: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 11ms/step - accuracy: 0.4058 - loss: 1.6811 - val_accuracy: 0.5510 - val_loss: 1.2954 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5701 - loss: 1.2325 - val_accuracy: 0.5630 - val_loss: 1.2612 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6215 - loss: 1.0981 - val_accuracy: 0.6172 - val_loss: 1.1183 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6535 - loss: 1.0140 - val_accuracy: 0.6472 - val_loss: 1.0535 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6728 - loss: 0.9446 - val_accuracy: 0.6188 - val_loss: 1.0858 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6903 - loss: 0.9077 - val_accuracy: 0.6498 - val_loss: 1.0213 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 6ms/step - accuracy: 0.7011 - loss: 0.8737 - val_accuracy: 0.6562 - val_loss: 1.0101 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7131 - loss: 0.8306 - val_accuracy: 0.6554 - val_loss: 1.0039 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7216 - loss: 0.8075 - val_accuracy: 0.6576 - val_loss: 0.9964 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7328 - loss: 0.7749 - val_accuracy: 0.6528 - val_loss: 1.0008 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7400 - loss: 0.7441 - val_accuracy: 0.6304 - val_loss: 1.0780 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7460 - loss: 0.7383 - val_accuracy: 0.6666 - val_loss: 0.9787 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7526 - loss: 0.7162 - val_accuracy: 0.6280 - val_loss: 1.0606 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7529 - loss: 0.7070 - val_accuracy: 0.6584 - val_loss: 1.0048 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7607 - loss: 0.6823 - val_accuracy: 0.6590 - val_loss: 1.0002 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6579 - loss: 1.0082 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.68 0.70 0.69 1000 1 0.82 0.73 0.77 1000 2 0.60 0.39 0.47 1000 3 0.48 0.45 0.46 1000 4 0.59 0.64 0.61 1000 5 0.56 0.59 0.58 1000 6 0.74 0.75 0.75 1000 7 0.69 0.75 0.72 1000 8 0.77 0.75 0.76 1000 9 0.64 0.83 0.73 1000 accuracy 0.66 10000 macro avg 0.66 0.66 0.65 10000 weighted avg 0.66 0.66 0.65 10000 Accuracy Score: 0.6578 Root Mean Square Error: 2.5354092371844037
In [ ]:
data_df = pd.DataFrame(data)
data_df
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.577 | 0.476 | 0.473 | 1.229 | 1.537 | 1.530 | 54.209 |
| 1 | DNN_DNN | 0.590 | 0.477 | 0.479 | 1.225 | 1.555 | 1.541 | 66.991 |
| 2 | CNN | 0.871 | 0.604 | 0.597 | 0.405 | 1.417 | 1.462 | 45.146 |
| 3 | CNN_CNN | 0.805 | 0.618 | 0.610 | 0.565 | 1.234 | 1.269 | 77.749 |
| 4 | CNN_MP | 0.797 | 0.619 | 0.628 | 0.595 | 1.177 | 1.202 | 60.753 |
| 5 | DNN_DO | 0.582 | 0.489 | 0.483 | 1.226 | 1.529 | 1.510 | 69.229 |
| 6 | DNN_DO_DNN | 0.594 | 0.484 | 0.490 | 1.215 | 1.526 | 1.515 | 95.612 |
| 7 | CNN_DO | 0.863 | 0.611 | 0.597 | 0.407 | 1.447 | 1.500 | 68.012 |
| 8 | CNN_DO_CNN | 0.794 | 0.623 | 0.615 | 0.592 | 1.216 | 1.234 | 83.938 |
| 9 | CNN_DO_MP | 0.748 | 0.637 | 0.633 | 0.735 | 1.049 | 1.068 | 57.858 |
| 10 | DNN_DNN_DO | 0.614 | 0.491 | 0.495 | 1.162 | 1.566 | 1.542 | 120.223 |
| 11 | CNN_CNN_DO | 0.826 | 0.649 | 0.636 | 0.490 | 1.190 | 1.231 | 103.162 |
| 12 | CNN_MP_DO | 0.759 | 0.662 | 0.651 | 0.700 | 1.030 | 1.067 | 66.997 |
| 13 | DNN_DO_DNN_DO | 0.583 | 0.493 | 0.498 | 1.239 | 1.503 | 1.488 | 128.963 |
| 14 | CNN_DO_CNN_DO | 0.771 | 0.627 | 0.635 | 0.653 | 1.101 | 1.123 | 98.968 |
| 15 | DNN_BN | 0.500 | 0.392 | 0.404 | 1.449 | 1.972 | 1.922 | 25.628 |
| 16 | DNN_BN_DNN | 0.608 | 0.436 | 0.435 | 1.198 | 1.744 | 1.713 | 56.549 |
| 17 | CNN_BN | 0.747 | 0.423 | 0.431 | 0.728 | 2.650 | 2.620 | 30.352 |
| 18 | CNN_BN_CNN | 0.927 | 0.566 | 0.564 | 0.212 | 2.358 | 2.438 | 56.151 |
| 19 | CNN_BN_MP | 0.808 | 0.447 | 0.440 | 0.542 | 2.857 | 2.906 | 55.307 |
| 20 | DNN_DNN_BN | 0.501 | 0.423 | 0.423 | 1.453 | 1.640 | 1.616 | 68.419 |
| 21 | CNN_CNN_BN | 0.809 | 0.562 | 0.559 | 0.555 | 1.565 | 1.573 | 74.480 |
| 22 | CNN_MP_BN | 0.760 | 0.597 | 0.590 | 0.694 | 1.343 | 1.391 | 39.724 |
| 23 | DNN_BN_DNN_BN | 0.527 | 0.461 | 0.470 | 1.429 | 1.592 | 1.562 | 66.279 |
| 24 | CNN_BN_CNN_BN | 0.802 | 0.541 | 0.551 | 0.565 | 1.843 | 1.793 | 108.790 |
| 25 | DNN_DO_DNN_BN | 0.556 | 0.457 | 0.459 | 1.287 | 1.607 | 1.591 | 95.676 |
| 26 | DNN_BN_DNN_DO | 0.581 | 0.465 | 0.471 | 1.253 | 1.702 | 1.665 | 72.696 |
| 27 | CNN_BN_CNN_DO | 0.915 | 0.615 | 0.619 | 0.245 | 1.762 | 1.807 | 67.884 |
| 28 | CNN_DO_CNN_BN | 0.759 | 0.532 | 0.542 | 0.681 | 1.536 | 1.542 | 67.479 |
| 29 | CNN_DO_DNN | 0.891 | 0.595 | 0.592 | 0.378 | 1.570 | 1.642 | 121.519 |
| 30 | DNN_DO_CNN | 0.719 | 0.577 | 0.577 | 0.813 | 1.291 | 1.292 | 128.007 |
| 31 | CNN_DO_MP_DO | 0.755 | 0.659 | 0.658 | 0.699 | 1.000 | 1.025 | 76.572 |
In [ ]:
# Build Model 34: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_0.5'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.5))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.5))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 15s 14ms/step - accuracy: 0.3922 - loss: 1.7057 - val_accuracy: 0.5392 - val_loss: 1.4025 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5452 - loss: 1.2936 - val_accuracy: 0.5678 - val_loss: 1.3238 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.5743 - loss: 1.2215 - val_accuracy: 0.5828 - val_loss: 1.3023 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5921 - loss: 1.1749 - val_accuracy: 0.5980 - val_loss: 1.2392 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6076 - loss: 1.1256 - val_accuracy: 0.6234 - val_loss: 1.1671 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6222 - loss: 1.0914 - val_accuracy: 0.6234 - val_loss: 1.1641 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6298 - loss: 1.0566 - val_accuracy: 0.6288 - val_loss: 1.1154 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6398 - loss: 1.0313 - val_accuracy: 0.6446 - val_loss: 1.0916 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6509 - loss: 1.0032 - val_accuracy: 0.6432 - val_loss: 1.0945 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6560 - loss: 0.9946 - val_accuracy: 0.6496 - val_loss: 1.0766 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6624 - loss: 0.9789 - val_accuracy: 0.6440 - val_loss: 1.0620 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6690 - loss: 0.9495 - val_accuracy: 0.6602 - val_loss: 1.0415 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6747 - loss: 0.9320 - val_accuracy: 0.6568 - val_loss: 1.0469 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6810 - loss: 0.9218 - val_accuracy: 0.6220 - val_loss: 1.0941 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6803 - loss: 0.9170 - val_accuracy: 0.6528 - val_loss: 1.0555 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6505 - loss: 1.0627 313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step Classification Report precision recall f1-score support 0 0.62 0.72 0.66 1000 1 0.71 0.85 0.77 1000 2 0.51 0.51 0.51 1000 3 0.53 0.43 0.47 1000 4 0.56 0.60 0.58 1000 5 0.58 0.53 0.56 1000 6 0.66 0.82 0.73 1000 7 0.75 0.69 0.72 1000 8 0.77 0.73 0.75 1000 9 0.83 0.62 0.71 1000 accuracy 0.65 10000 macro avg 0.65 0.65 0.65 10000 weighted avg 0.65 0.65 0.65 10000 Accuracy Score: 0.6489 Root Mean Square Error: 2.564254277562972
In [ ]:
data_df = pd.DataFrame(data)
data_df
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.577 | 0.476 | 0.473 | 1.229 | 1.537 | 1.530 | 54.209 |
| 1 | DNN_DNN | 0.590 | 0.477 | 0.479 | 1.225 | 1.555 | 1.541 | 66.991 |
| 2 | CNN | 0.871 | 0.604 | 0.597 | 0.405 | 1.417 | 1.462 | 45.146 |
| 3 | CNN_CNN | 0.805 | 0.618 | 0.610 | 0.565 | 1.234 | 1.269 | 77.749 |
| 4 | CNN_MP | 0.797 | 0.619 | 0.628 | 0.595 | 1.177 | 1.202 | 60.753 |
| 5 | DNN_DO | 0.582 | 0.489 | 0.483 | 1.226 | 1.529 | 1.510 | 69.229 |
| 6 | DNN_DO_DNN | 0.594 | 0.484 | 0.490 | 1.215 | 1.526 | 1.515 | 95.612 |
| 7 | CNN_DO | 0.863 | 0.611 | 0.597 | 0.407 | 1.447 | 1.500 | 68.012 |
| 8 | CNN_DO_CNN | 0.794 | 0.623 | 0.615 | 0.592 | 1.216 | 1.234 | 83.938 |
| 9 | CNN_DO_MP | 0.748 | 0.637 | 0.633 | 0.735 | 1.049 | 1.068 | 57.858 |
| 10 | DNN_DNN_DO | 0.614 | 0.491 | 0.495 | 1.162 | 1.566 | 1.542 | 120.223 |
| 11 | CNN_CNN_DO | 0.826 | 0.649 | 0.636 | 0.490 | 1.190 | 1.231 | 103.162 |
| 12 | CNN_MP_DO | 0.759 | 0.662 | 0.651 | 0.700 | 1.030 | 1.067 | 66.997 |
| 13 | DNN_DO_DNN_DO | 0.583 | 0.493 | 0.498 | 1.239 | 1.503 | 1.488 | 128.963 |
| 14 | CNN_DO_CNN_DO | 0.771 | 0.627 | 0.635 | 0.653 | 1.101 | 1.123 | 98.968 |
| 15 | DNN_BN | 0.500 | 0.392 | 0.404 | 1.449 | 1.972 | 1.922 | 25.628 |
| 16 | DNN_BN_DNN | 0.608 | 0.436 | 0.435 | 1.198 | 1.744 | 1.713 | 56.549 |
| 17 | CNN_BN | 0.747 | 0.423 | 0.431 | 0.728 | 2.650 | 2.620 | 30.352 |
| 18 | CNN_BN_CNN | 0.927 | 0.566 | 0.564 | 0.212 | 2.358 | 2.438 | 56.151 |
| 19 | CNN_BN_MP | 0.808 | 0.447 | 0.440 | 0.542 | 2.857 | 2.906 | 55.307 |
| 20 | DNN_DNN_BN | 0.501 | 0.423 | 0.423 | 1.453 | 1.640 | 1.616 | 68.419 |
| 21 | CNN_CNN_BN | 0.809 | 0.562 | 0.559 | 0.555 | 1.565 | 1.573 | 74.480 |
| 22 | CNN_MP_BN | 0.760 | 0.597 | 0.590 | 0.694 | 1.343 | 1.391 | 39.724 |
| 23 | DNN_BN_DNN_BN | 0.527 | 0.461 | 0.470 | 1.429 | 1.592 | 1.562 | 66.279 |
| 24 | CNN_BN_CNN_BN | 0.802 | 0.541 | 0.551 | 0.565 | 1.843 | 1.793 | 108.790 |
| 25 | DNN_DO_DNN_BN | 0.556 | 0.457 | 0.459 | 1.287 | 1.607 | 1.591 | 95.676 |
| 26 | DNN_BN_DNN_DO | 0.581 | 0.465 | 0.471 | 1.253 | 1.702 | 1.665 | 72.696 |
| 27 | CNN_BN_CNN_DO | 0.915 | 0.615 | 0.619 | 0.245 | 1.762 | 1.807 | 67.884 |
| 28 | CNN_DO_CNN_BN | 0.759 | 0.532 | 0.542 | 0.681 | 1.536 | 1.542 | 67.479 |
| 29 | CNN_DO_DNN | 0.891 | 0.595 | 0.592 | 0.378 | 1.570 | 1.642 | 121.519 |
| 30 | DNN_DO_CNN | 0.719 | 0.577 | 0.577 | 0.813 | 1.291 | 1.292 | 128.007 |
| 31 | CNN_DO_MP_DO | 0.755 | 0.659 | 0.658 | 0.699 | 1.000 | 1.025 | 76.572 |
| 32 | CNN_DO_MP_DO_0.5 | 0.677 | 0.653 | 0.649 | 0.932 | 1.056 | 1.071 | 83.411 |
In [ ]:
# Build Model 34: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_0.5-1'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.5))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 11ms/step - accuracy: 0.4122 - loss: 1.6728 - val_accuracy: 0.5336 - val_loss: 1.4118 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5703 - loss: 1.2424 - val_accuracy: 0.5452 - val_loss: 1.3270 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6047 - loss: 1.1428 - val_accuracy: 0.6104 - val_loss: 1.1941 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6365 - loss: 1.0601 - val_accuracy: 0.6282 - val_loss: 1.1420 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6499 - loss: 1.0085 - val_accuracy: 0.6220 - val_loss: 1.1392 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6693 - loss: 0.9606 - val_accuracy: 0.6410 - val_loss: 1.0918 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6775 - loss: 0.9331 - val_accuracy: 0.6114 - val_loss: 1.1304 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6886 - loss: 0.9000 - val_accuracy: 0.6568 - val_loss: 1.0462 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6967 - loss: 0.8668 - val_accuracy: 0.6422 - val_loss: 1.0609 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7060 - loss: 0.8502 - val_accuracy: 0.6504 - val_loss: 1.0337 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7073 - loss: 0.8426 - val_accuracy: 0.6570 - val_loss: 1.0196 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7180 - loss: 0.8130 - val_accuracy: 0.6438 - val_loss: 1.0501 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7277 - loss: 0.7918 - val_accuracy: 0.6584 - val_loss: 1.0151 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7295 - loss: 0.7810 - val_accuracy: 0.6476 - val_loss: 1.0343 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7376 - loss: 0.7575 - val_accuracy: 0.6528 - val_loss: 1.0180 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7355 - loss: 0.7512 - val_accuracy: 0.6636 - val_loss: 0.9898 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7394 - loss: 0.7445 - val_accuracy: 0.6522 - val_loss: 1.0193 Epoch 18/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7481 - loss: 0.7255 - val_accuracy: 0.6506 - val_loss: 1.0151 Epoch 19/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7514 - loss: 0.7171 - val_accuracy: 0.6494 - val_loss: 1.0388 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6457 - loss: 1.0541 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.61 0.72 0.66 1000 1 0.70 0.83 0.76 1000 2 0.54 0.42 0.47 1000 3 0.54 0.36 0.43 1000 4 0.59 0.58 0.58 1000 5 0.66 0.43 0.52 1000 6 0.58 0.87 0.69 1000 7 0.74 0.69 0.72 1000 8 0.70 0.79 0.74 1000 9 0.74 0.72 0.73 1000 accuracy 0.64 10000 macro avg 0.64 0.64 0.63 10000 weighted avg 0.64 0.64 0.63 10000 Accuracy Score: 0.6412 Root Mean Square Error: 2.6135607894212067
In [ ]:
data_df = pd.DataFrame(data)
data_df
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.577 | 0.476 | 0.473 | 1.229 | 1.537 | 1.530 | 54.209 |
| 1 | DNN_DNN | 0.590 | 0.477 | 0.479 | 1.225 | 1.555 | 1.541 | 66.991 |
| 2 | CNN | 0.871 | 0.604 | 0.597 | 0.405 | 1.417 | 1.462 | 45.146 |
| 3 | CNN_CNN | 0.805 | 0.618 | 0.610 | 0.565 | 1.234 | 1.269 | 77.749 |
| 4 | CNN_MP | 0.797 | 0.619 | 0.628 | 0.595 | 1.177 | 1.202 | 60.753 |
| 5 | DNN_DO | 0.582 | 0.489 | 0.483 | 1.226 | 1.529 | 1.510 | 69.229 |
| 6 | DNN_DO_DNN | 0.594 | 0.484 | 0.490 | 1.215 | 1.526 | 1.515 | 95.612 |
| 7 | CNN_DO | 0.863 | 0.611 | 0.597 | 0.407 | 1.447 | 1.500 | 68.012 |
| 8 | CNN_DO_CNN | 0.794 | 0.623 | 0.615 | 0.592 | 1.216 | 1.234 | 83.938 |
| 9 | CNN_DO_MP | 0.748 | 0.637 | 0.633 | 0.735 | 1.049 | 1.068 | 57.858 |
| 10 | DNN_DNN_DO | 0.614 | 0.491 | 0.495 | 1.162 | 1.566 | 1.542 | 120.223 |
| 11 | CNN_CNN_DO | 0.826 | 0.649 | 0.636 | 0.490 | 1.190 | 1.231 | 103.162 |
| 12 | CNN_MP_DO | 0.759 | 0.662 | 0.651 | 0.700 | 1.030 | 1.067 | 66.997 |
| 13 | DNN_DO_DNN_DO | 0.583 | 0.493 | 0.498 | 1.239 | 1.503 | 1.488 | 128.963 |
| 14 | CNN_DO_CNN_DO | 0.771 | 0.627 | 0.635 | 0.653 | 1.101 | 1.123 | 98.968 |
| 15 | DNN_BN | 0.500 | 0.392 | 0.404 | 1.449 | 1.972 | 1.922 | 25.628 |
| 16 | DNN_BN_DNN | 0.608 | 0.436 | 0.435 | 1.198 | 1.744 | 1.713 | 56.549 |
| 17 | CNN_BN | 0.747 | 0.423 | 0.431 | 0.728 | 2.650 | 2.620 | 30.352 |
| 18 | CNN_BN_CNN | 0.927 | 0.566 | 0.564 | 0.212 | 2.358 | 2.438 | 56.151 |
| 19 | CNN_BN_MP | 0.808 | 0.447 | 0.440 | 0.542 | 2.857 | 2.906 | 55.307 |
| 20 | DNN_DNN_BN | 0.501 | 0.423 | 0.423 | 1.453 | 1.640 | 1.616 | 68.419 |
| 21 | CNN_CNN_BN | 0.809 | 0.562 | 0.559 | 0.555 | 1.565 | 1.573 | 74.480 |
| 22 | CNN_MP_BN | 0.760 | 0.597 | 0.590 | 0.694 | 1.343 | 1.391 | 39.724 |
| 23 | DNN_BN_DNN_BN | 0.527 | 0.461 | 0.470 | 1.429 | 1.592 | 1.562 | 66.279 |
| 24 | CNN_BN_CNN_BN | 0.802 | 0.541 | 0.551 | 0.565 | 1.843 | 1.793 | 108.790 |
| 25 | DNN_DO_DNN_BN | 0.556 | 0.457 | 0.459 | 1.287 | 1.607 | 1.591 | 95.676 |
| 26 | DNN_BN_DNN_DO | 0.581 | 0.465 | 0.471 | 1.253 | 1.702 | 1.665 | 72.696 |
| 27 | CNN_BN_CNN_DO | 0.915 | 0.615 | 0.619 | 0.245 | 1.762 | 1.807 | 67.884 |
| 28 | CNN_DO_CNN_BN | 0.759 | 0.532 | 0.542 | 0.681 | 1.536 | 1.542 | 67.479 |
| 29 | CNN_DO_DNN | 0.891 | 0.595 | 0.592 | 0.378 | 1.570 | 1.642 | 121.519 |
| 30 | DNN_DO_CNN | 0.719 | 0.577 | 0.577 | 0.813 | 1.291 | 1.292 | 128.007 |
| 31 | CNN_DO_MP_DO | 0.755 | 0.659 | 0.658 | 0.699 | 1.000 | 1.025 | 76.572 |
| 32 | CNN_DO_MP_DO_0.5 | 0.677 | 0.653 | 0.649 | 0.932 | 1.056 | 1.071 | 83.411 |
| 33 | CNN_DO_MP_DO_0.5-1 | 0.743 | 0.649 | 0.641 | 0.735 | 1.039 | 1.065 | 96.591 |
In [ ]:
# Build Model 34: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_0.5-2'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.5))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 11ms/step - accuracy: 0.3993 - loss: 1.6994 - val_accuracy: 0.5558 - val_loss: 1.3071 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.5593 - loss: 1.2599 - val_accuracy: 0.5828 - val_loss: 1.2304 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5934 - loss: 1.1739 - val_accuracy: 0.6062 - val_loss: 1.1518 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6141 - loss: 1.1042 - val_accuracy: 0.5996 - val_loss: 1.1868 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6337 - loss: 1.0620 - val_accuracy: 0.6224 - val_loss: 1.0993 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6507 - loss: 1.0121 - val_accuracy: 0.6148 - val_loss: 1.1221 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6590 - loss: 0.9864 - val_accuracy: 0.6378 - val_loss: 1.0614 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6697 - loss: 0.9528 - val_accuracy: 0.6486 - val_loss: 1.0370 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6749 - loss: 0.9451 - val_accuracy: 0.6482 - val_loss: 1.0411 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6859 - loss: 0.9122 - val_accuracy: 0.6532 - val_loss: 1.0299 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6871 - loss: 0.8962 - val_accuracy: 0.6284 - val_loss: 1.0697 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6946 - loss: 0.8757 - val_accuracy: 0.6588 - val_loss: 1.0067 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6982 - loss: 0.8708 - val_accuracy: 0.6560 - val_loss: 1.0089 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7020 - loss: 0.8515 - val_accuracy: 0.6560 - val_loss: 1.0080 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7027 - loss: 0.8498 - val_accuracy: 0.6630 - val_loss: 1.0025 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7115 - loss: 0.8301 - val_accuracy: 0.6574 - val_loss: 1.0016 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7153 - loss: 0.8173 - val_accuracy: 0.6560 - val_loss: 1.0063 Epoch 18/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7184 - loss: 0.8114 - val_accuracy: 0.6568 - val_loss: 1.0056 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6514 - loss: 1.0273 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.72 0.65 0.68 1000 1 0.83 0.74 0.78 1000 2 0.60 0.33 0.43 1000 3 0.52 0.43 0.47 1000 4 0.44 0.79 0.56 1000 5 0.65 0.44 0.52 1000 6 0.69 0.80 0.74 1000 7 0.70 0.75 0.72 1000 8 0.74 0.79 0.76 1000 9 0.73 0.77 0.75 1000 accuracy 0.65 10000 macro avg 0.66 0.65 0.64 10000 weighted avg 0.66 0.65 0.64 10000 Accuracy Score: 0.6482 Root Mean Square Error: 2.4591665254715878
In [ ]:
data_df = pd.DataFrame(data)
data_df
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.577 | 0.476 | 0.473 | 1.229 | 1.537 | 1.530 | 54.209 |
| 1 | DNN_DNN | 0.590 | 0.477 | 0.479 | 1.225 | 1.555 | 1.541 | 66.991 |
| 2 | CNN | 0.871 | 0.604 | 0.597 | 0.405 | 1.417 | 1.462 | 45.146 |
| 3 | CNN_CNN | 0.805 | 0.618 | 0.610 | 0.565 | 1.234 | 1.269 | 77.749 |
| 4 | CNN_MP | 0.797 | 0.619 | 0.628 | 0.595 | 1.177 | 1.202 | 60.753 |
| 5 | DNN_DO | 0.582 | 0.489 | 0.483 | 1.226 | 1.529 | 1.510 | 69.229 |
| 6 | DNN_DO_DNN | 0.594 | 0.484 | 0.490 | 1.215 | 1.526 | 1.515 | 95.612 |
| 7 | CNN_DO | 0.863 | 0.611 | 0.597 | 0.407 | 1.447 | 1.500 | 68.012 |
| 8 | CNN_DO_CNN | 0.794 | 0.623 | 0.615 | 0.592 | 1.216 | 1.234 | 83.938 |
| 9 | CNN_DO_MP | 0.748 | 0.637 | 0.633 | 0.735 | 1.049 | 1.068 | 57.858 |
| 10 | DNN_DNN_DO | 0.614 | 0.491 | 0.495 | 1.162 | 1.566 | 1.542 | 120.223 |
| 11 | CNN_CNN_DO | 0.826 | 0.649 | 0.636 | 0.490 | 1.190 | 1.231 | 103.162 |
| 12 | CNN_MP_DO | 0.759 | 0.662 | 0.651 | 0.700 | 1.030 | 1.067 | 66.997 |
| 13 | DNN_DO_DNN_DO | 0.583 | 0.493 | 0.498 | 1.239 | 1.503 | 1.488 | 128.963 |
| 14 | CNN_DO_CNN_DO | 0.771 | 0.627 | 0.635 | 0.653 | 1.101 | 1.123 | 98.968 |
| 15 | DNN_BN | 0.500 | 0.392 | 0.404 | 1.449 | 1.972 | 1.922 | 25.628 |
| 16 | DNN_BN_DNN | 0.608 | 0.436 | 0.435 | 1.198 | 1.744 | 1.713 | 56.549 |
| 17 | CNN_BN | 0.747 | 0.423 | 0.431 | 0.728 | 2.650 | 2.620 | 30.352 |
| 18 | CNN_BN_CNN | 0.927 | 0.566 | 0.564 | 0.212 | 2.358 | 2.438 | 56.151 |
| 19 | CNN_BN_MP | 0.808 | 0.447 | 0.440 | 0.542 | 2.857 | 2.906 | 55.307 |
| 20 | DNN_DNN_BN | 0.501 | 0.423 | 0.423 | 1.453 | 1.640 | 1.616 | 68.419 |
| 21 | CNN_CNN_BN | 0.809 | 0.562 | 0.559 | 0.555 | 1.565 | 1.573 | 74.480 |
| 22 | CNN_MP_BN | 0.760 | 0.597 | 0.590 | 0.694 | 1.343 | 1.391 | 39.724 |
| 23 | DNN_BN_DNN_BN | 0.527 | 0.461 | 0.470 | 1.429 | 1.592 | 1.562 | 66.279 |
| 24 | CNN_BN_CNN_BN | 0.802 | 0.541 | 0.551 | 0.565 | 1.843 | 1.793 | 108.790 |
| 25 | DNN_DO_DNN_BN | 0.556 | 0.457 | 0.459 | 1.287 | 1.607 | 1.591 | 95.676 |
| 26 | DNN_BN_DNN_DO | 0.581 | 0.465 | 0.471 | 1.253 | 1.702 | 1.665 | 72.696 |
| 27 | CNN_BN_CNN_DO | 0.915 | 0.615 | 0.619 | 0.245 | 1.762 | 1.807 | 67.884 |
| 28 | CNN_DO_CNN_BN | 0.759 | 0.532 | 0.542 | 0.681 | 1.536 | 1.542 | 67.479 |
| 29 | CNN_DO_DNN | 0.891 | 0.595 | 0.592 | 0.378 | 1.570 | 1.642 | 121.519 |
| 30 | DNN_DO_CNN | 0.719 | 0.577 | 0.577 | 0.813 | 1.291 | 1.292 | 128.007 |
| 31 | CNN_DO_MP_DO | 0.755 | 0.659 | 0.658 | 0.699 | 1.000 | 1.025 | 76.572 |
| 32 | CNN_DO_MP_DO_0.5 | 0.677 | 0.653 | 0.649 | 0.932 | 1.056 | 1.071 | 83.411 |
| 33 | CNN_DO_MP_DO_0.5-1 | 0.743 | 0.649 | 0.641 | 0.735 | 1.039 | 1.065 | 96.591 |
| 34 | CNN_DO_MP_DO_0.5-2 | 0.714 | 0.657 | 0.648 | 0.824 | 1.006 | 1.036 | 85.457 |
In [ ]:
# Build Model 34: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_0.1'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.1))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.1))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 15s 13ms/step - accuracy: 0.4014 - loss: 1.6821 - val_accuracy: 0.5400 - val_loss: 1.3098 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 13s 8ms/step - accuracy: 0.5846 - loss: 1.1973 - val_accuracy: 0.5738 - val_loss: 1.2136 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6255 - loss: 1.0857 - val_accuracy: 0.6086 - val_loss: 1.1060 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6592 - loss: 0.9825 - val_accuracy: 0.6272 - val_loss: 1.0690 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6814 - loss: 0.9220 - val_accuracy: 0.6400 - val_loss: 1.0466 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7027 - loss: 0.8656 - val_accuracy: 0.6254 - val_loss: 1.0873 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 6ms/step - accuracy: 0.7198 - loss: 0.8108 - val_accuracy: 0.6564 - val_loss: 1.0000 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7389 - loss: 0.7689 - val_accuracy: 0.6526 - val_loss: 1.0059 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7495 - loss: 0.7344 - val_accuracy: 0.6412 - val_loss: 1.0539 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7643 - loss: 0.6918 - val_accuracy: 0.6462 - val_loss: 1.0190 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6403 - loss: 1.0436 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.67 0.68 0.67 1000 1 0.64 0.89 0.74 1000 2 0.56 0.40 0.47 1000 3 0.45 0.52 0.49 1000 4 0.61 0.54 0.58 1000 5 0.62 0.48 0.54 1000 6 0.62 0.83 0.71 1000 7 0.74 0.75 0.74 1000 8 0.75 0.74 0.74 1000 9 0.82 0.58 0.68 1000 accuracy 0.64 10000 macro avg 0.65 0.64 0.64 10000 weighted avg 0.65 0.64 0.64 10000 Accuracy Score: 0.6413 Root Mean Square Error: 2.637783160155512
In [ ]:
# Build Model 34: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_0.1-1'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.1))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 9ms/step - accuracy: 0.4069 - loss: 1.6825 - val_accuracy: 0.5344 - val_loss: 1.3063 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5700 - loss: 1.2356 - val_accuracy: 0.5654 - val_loss: 1.2481 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6193 - loss: 1.0983 - val_accuracy: 0.5958 - val_loss: 1.1609 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6472 - loss: 1.0141 - val_accuracy: 0.6242 - val_loss: 1.0707 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6725 - loss: 0.9532 - val_accuracy: 0.6368 - val_loss: 1.0479 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6931 - loss: 0.8978 - val_accuracy: 0.6420 - val_loss: 1.0399 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7041 - loss: 0.8683 - val_accuracy: 0.6508 - val_loss: 0.9925 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7137 - loss: 0.8381 - val_accuracy: 0.6378 - val_loss: 1.0454 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7262 - loss: 0.8029 - val_accuracy: 0.6244 - val_loss: 1.0706 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7276 - loss: 0.7842 - val_accuracy: 0.6594 - val_loss: 0.9886 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7339 - loss: 0.7692 - val_accuracy: 0.6154 - val_loss: 1.1130 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7447 - loss: 0.7395 - val_accuracy: 0.6524 - val_loss: 1.0117 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7553 - loss: 0.7093 - val_accuracy: 0.6572 - val_loss: 1.0009 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6501 - loss: 1.0148 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.63 0.74 0.68 1000 1 0.80 0.75 0.77 1000 2 0.54 0.43 0.48 1000 3 0.46 0.52 0.49 1000 4 0.61 0.52 0.56 1000 5 0.61 0.47 0.53 1000 6 0.61 0.84 0.71 1000 7 0.73 0.71 0.72 1000 8 0.77 0.75 0.76 1000 9 0.72 0.75 0.74 1000 accuracy 0.65 10000 macro avg 0.65 0.65 0.64 10000 weighted avg 0.65 0.65 0.64 10000 Accuracy Score: 0.6465 Root Mean Square Error: 2.540551121312067
In [ ]:
# Build Model 34: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_0.1-2'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.1))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.4008 - loss: 1.6988 - val_accuracy: 0.5338 - val_loss: 1.3415 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.5708 - loss: 1.2401 - val_accuracy: 0.5800 - val_loss: 1.2133 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6176 - loss: 1.1090 - val_accuracy: 0.6040 - val_loss: 1.1597 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6443 - loss: 1.0258 - val_accuracy: 0.6218 - val_loss: 1.1039 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 6ms/step - accuracy: 0.6712 - loss: 0.9504 - val_accuracy: 0.6320 - val_loss: 1.0737 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6871 - loss: 0.9059 - val_accuracy: 0.6460 - val_loss: 1.0552 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7108 - loss: 0.8469 - val_accuracy: 0.6464 - val_loss: 1.0309 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7286 - loss: 0.8011 - val_accuracy: 0.6562 - val_loss: 0.9909 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7353 - loss: 0.7697 - val_accuracy: 0.6372 - val_loss: 1.0641 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7462 - loss: 0.7378 - val_accuracy: 0.6364 - val_loss: 1.0597 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7593 - loss: 0.6906 - val_accuracy: 0.6578 - val_loss: 0.9917 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7728 - loss: 0.6673 - val_accuracy: 0.6290 - val_loss: 1.0543 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7763 - loss: 0.6476 - val_accuracy: 0.6250 - val_loss: 1.1168 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7837 - loss: 0.6246 - val_accuracy: 0.6492 - val_loss: 1.0394 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.6398 - loss: 1.0594 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.67 0.66 0.66 1000 1 0.85 0.67 0.75 1000 2 0.50 0.46 0.48 1000 3 0.45 0.50 0.47 1000 4 0.52 0.70 0.60 1000 5 0.64 0.43 0.51 1000 6 0.64 0.83 0.72 1000 7 0.83 0.61 0.70 1000 8 0.82 0.73 0.77 1000 9 0.66 0.82 0.73 1000 accuracy 0.64 10000 macro avg 0.66 0.64 0.64 10000 weighted avg 0.66 0.64 0.64 10000 Accuracy Score: 0.6408 Root Mean Square Error: 2.5538010885736577
In [ ]:
data_df = pd.DataFrame(data)
data_df
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.577 | 0.476 | 0.473 | 1.229 | 1.537 | 1.530 | 54.209 |
| 1 | DNN_DNN | 0.590 | 0.477 | 0.479 | 1.225 | 1.555 | 1.541 | 66.991 |
| 2 | CNN | 0.871 | 0.604 | 0.597 | 0.405 | 1.417 | 1.462 | 45.146 |
| 3 | CNN_CNN | 0.805 | 0.618 | 0.610 | 0.565 | 1.234 | 1.269 | 77.749 |
| 4 | CNN_MP | 0.797 | 0.619 | 0.628 | 0.595 | 1.177 | 1.202 | 60.753 |
| 5 | DNN_DO | 0.582 | 0.489 | 0.483 | 1.226 | 1.529 | 1.510 | 69.229 |
| 6 | DNN_DO_DNN | 0.594 | 0.484 | 0.490 | 1.215 | 1.526 | 1.515 | 95.612 |
| 7 | CNN_DO | 0.863 | 0.611 | 0.597 | 0.407 | 1.447 | 1.500 | 68.012 |
| 8 | CNN_DO_CNN | 0.794 | 0.623 | 0.615 | 0.592 | 1.216 | 1.234 | 83.938 |
| 9 | CNN_DO_MP | 0.748 | 0.637 | 0.633 | 0.735 | 1.049 | 1.068 | 57.858 |
| 10 | DNN_DNN_DO | 0.614 | 0.491 | 0.495 | 1.162 | 1.566 | 1.542 | 120.223 |
| 11 | CNN_CNN_DO | 0.826 | 0.649 | 0.636 | 0.490 | 1.190 | 1.231 | 103.162 |
| 12 | CNN_MP_DO | 0.759 | 0.662 | 0.651 | 0.700 | 1.030 | 1.067 | 66.997 |
| 13 | DNN_DO_DNN_DO | 0.583 | 0.493 | 0.498 | 1.239 | 1.503 | 1.488 | 128.963 |
| 14 | CNN_DO_CNN_DO | 0.771 | 0.627 | 0.635 | 0.653 | 1.101 | 1.123 | 98.968 |
| 15 | DNN_BN | 0.500 | 0.392 | 0.404 | 1.449 | 1.972 | 1.922 | 25.628 |
| 16 | DNN_BN_DNN | 0.608 | 0.436 | 0.435 | 1.198 | 1.744 | 1.713 | 56.549 |
| 17 | CNN_BN | 0.747 | 0.423 | 0.431 | 0.728 | 2.650 | 2.620 | 30.352 |
| 18 | CNN_BN_CNN | 0.927 | 0.566 | 0.564 | 0.212 | 2.358 | 2.438 | 56.151 |
| 19 | CNN_BN_MP | 0.808 | 0.447 | 0.440 | 0.542 | 2.857 | 2.906 | 55.307 |
| 20 | DNN_DNN_BN | 0.501 | 0.423 | 0.423 | 1.453 | 1.640 | 1.616 | 68.419 |
| 21 | CNN_CNN_BN | 0.809 | 0.562 | 0.559 | 0.555 | 1.565 | 1.573 | 74.480 |
| 22 | CNN_MP_BN | 0.760 | 0.597 | 0.590 | 0.694 | 1.343 | 1.391 | 39.724 |
| 23 | DNN_BN_DNN_BN | 0.527 | 0.461 | 0.470 | 1.429 | 1.592 | 1.562 | 66.279 |
| 24 | CNN_BN_CNN_BN | 0.802 | 0.541 | 0.551 | 0.565 | 1.843 | 1.793 | 108.790 |
| 25 | DNN_DO_DNN_BN | 0.556 | 0.457 | 0.459 | 1.287 | 1.607 | 1.591 | 95.676 |
| 26 | DNN_BN_DNN_DO | 0.581 | 0.465 | 0.471 | 1.253 | 1.702 | 1.665 | 72.696 |
| 27 | CNN_BN_CNN_DO | 0.915 | 0.615 | 0.619 | 0.245 | 1.762 | 1.807 | 67.884 |
| 28 | CNN_DO_CNN_BN | 0.759 | 0.532 | 0.542 | 0.681 | 1.536 | 1.542 | 67.479 |
| 29 | CNN_DO_DNN | 0.891 | 0.595 | 0.592 | 0.378 | 1.570 | 1.642 | 121.519 |
| 30 | DNN_DO_CNN | 0.719 | 0.577 | 0.577 | 0.813 | 1.291 | 1.292 | 128.007 |
| 31 | CNN_DO_MP_DO | 0.755 | 0.659 | 0.658 | 0.699 | 1.000 | 1.025 | 76.572 |
| 32 | CNN_DO_MP_DO_0.5 | 0.677 | 0.653 | 0.649 | 0.932 | 1.056 | 1.071 | 83.411 |
| 33 | CNN_DO_MP_DO_0.5-1 | 0.743 | 0.649 | 0.641 | 0.735 | 1.039 | 1.065 | 96.591 |
| 34 | CNN_DO_MP_DO_0.5-2 | 0.714 | 0.657 | 0.648 | 0.824 | 1.006 | 1.036 | 85.457 |
| 35 | CNN_DO_MP_DO_0.1 | 0.758 | 0.646 | 0.641 | 0.708 | 1.019 | 1.054 | 67.886 |
| 36 | CNN_DO_MP_DO_0.1-1 | 0.744 | 0.657 | 0.646 | 0.736 | 1.001 | 1.036 | 67.855 |
| 37 | CNN_DO_MP_DO_0.1-2 | 0.776 | 0.649 | 0.641 | 0.640 | 1.039 | 1.073 | 75.070 |
In [ ]:
# Build Model 34: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_0.2'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.2))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.2))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 11ms/step - accuracy: 0.4060 - loss: 1.6768 - val_accuracy: 0.5404 - val_loss: 1.3551 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.5738 - loss: 1.2198 - val_accuracy: 0.5698 - val_loss: 1.2209 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6199 - loss: 1.0986 - val_accuracy: 0.5908 - val_loss: 1.1674 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6505 - loss: 1.0238 - val_accuracy: 0.6286 - val_loss: 1.0867 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6726 - loss: 0.9528 - val_accuracy: 0.6120 - val_loss: 1.0872 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 6ms/step - accuracy: 0.6873 - loss: 0.9149 - val_accuracy: 0.6320 - val_loss: 1.0552 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6993 - loss: 0.8635 - val_accuracy: 0.6536 - val_loss: 1.0075 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7141 - loss: 0.8290 - val_accuracy: 0.6386 - val_loss: 1.0375 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7227 - loss: 0.8001 - val_accuracy: 0.6312 - val_loss: 1.0585 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7327 - loss: 0.7692 - val_accuracy: 0.6430 - val_loss: 1.0169 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6497 - loss: 1.0308 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.66 0.68 0.67 1000 1 0.81 0.75 0.78 1000 2 0.51 0.55 0.53 1000 3 0.39 0.62 0.48 1000 4 0.68 0.49 0.57 1000 5 0.57 0.48 0.52 1000 6 0.80 0.66 0.72 1000 7 0.81 0.65 0.72 1000 8 0.65 0.85 0.74 1000 9 0.77 0.70 0.74 1000 accuracy 0.64 10000 macro avg 0.67 0.64 0.65 10000 weighted avg 0.67 0.64 0.65 10000 Accuracy Score: 0.6426 Root Mean Square Error: 2.5273899580397163
In [ ]:
# Build Model 34: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_0.2-1'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.2))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.4125 - loss: 1.6678 - val_accuracy: 0.5238 - val_loss: 1.4067 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 6ms/step - accuracy: 0.5759 - loss: 1.2293 - val_accuracy: 0.5864 - val_loss: 1.1845 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6109 - loss: 1.1128 - val_accuracy: 0.6092 - val_loss: 1.1278 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6395 - loss: 1.0428 - val_accuracy: 0.6308 - val_loss: 1.0777 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6606 - loss: 0.9859 - val_accuracy: 0.6460 - val_loss: 1.0418 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6828 - loss: 0.9239 - val_accuracy: 0.6510 - val_loss: 1.0089 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 6ms/step - accuracy: 0.6984 - loss: 0.8699 - val_accuracy: 0.6344 - val_loss: 1.0457 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7047 - loss: 0.8481 - val_accuracy: 0.6416 - val_loss: 1.0432 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7191 - loss: 0.8191 - val_accuracy: 0.6644 - val_loss: 0.9857 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7326 - loss: 0.7755 - val_accuracy: 0.6532 - val_loss: 1.0034 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7341 - loss: 0.7627 - val_accuracy: 0.6418 - val_loss: 1.0437 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7424 - loss: 0.7433 - val_accuracy: 0.6538 - val_loss: 1.0055 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6441 - loss: 1.0293 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.67 0.63 0.65 1000 1 0.76 0.77 0.77 1000 2 0.53 0.47 0.50 1000 3 0.53 0.44 0.48 1000 4 0.53 0.65 0.59 1000 5 0.62 0.45 0.52 1000 6 0.79 0.67 0.72 1000 7 0.64 0.77 0.70 1000 8 0.68 0.81 0.73 1000 9 0.68 0.78 0.73 1000 accuracy 0.64 10000 macro avg 0.64 0.64 0.64 10000 weighted avg 0.64 0.64 0.64 10000 Accuracy Score: 0.644 Root Mean Square Error: 2.598595774644452
In [ ]:
# Build Model 34: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_0.2-2'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.2))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 10ms/step - accuracy: 0.3937 - loss: 1.7085 - val_accuracy: 0.5464 - val_loss: 1.3203 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5712 - loss: 1.2311 - val_accuracy: 0.5618 - val_loss: 1.2528 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6075 - loss: 1.1346 - val_accuracy: 0.6036 - val_loss: 1.1666 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6480 - loss: 1.0288 - val_accuracy: 0.6154 - val_loss: 1.1025 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6674 - loss: 0.9667 - val_accuracy: 0.6390 - val_loss: 1.0550 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6841 - loss: 0.9175 - val_accuracy: 0.6468 - val_loss: 1.0394 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7004 - loss: 0.8632 - val_accuracy: 0.6528 - val_loss: 1.0137 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7171 - loss: 0.8256 - val_accuracy: 0.6598 - val_loss: 0.9849 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7272 - loss: 0.7905 - val_accuracy: 0.6496 - val_loss: 1.0228 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7364 - loss: 0.7681 - val_accuracy: 0.6526 - val_loss: 1.0033 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7383 - loss: 0.7585 - val_accuracy: 0.6610 - val_loss: 0.9878 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7495 - loss: 0.7233 - val_accuracy: 0.6616 - val_loss: 0.9867 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7558 - loss: 0.7071 - val_accuracy: 0.6422 - val_loss: 1.0345 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 6ms/step - accuracy: 0.7665 - loss: 0.6825 - val_accuracy: 0.6634 - val_loss: 0.9914 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7714 - loss: 0.6588 - val_accuracy: 0.6568 - val_loss: 1.0049 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7764 - loss: 0.6445 - val_accuracy: 0.6474 - val_loss: 1.0302 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7772 - loss: 0.6389 - val_accuracy: 0.6510 - val_loss: 1.0321 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6528 - loss: 1.0438 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.73 0.61 0.66 1000 1 0.71 0.82 0.76 1000 2 0.59 0.41 0.48 1000 3 0.45 0.54 0.49 1000 4 0.57 0.64 0.60 1000 5 0.63 0.46 0.53 1000 6 0.75 0.74 0.74 1000 7 0.66 0.77 0.71 1000 8 0.74 0.75 0.74 1000 9 0.69 0.75 0.72 1000 accuracy 0.65 10000 macro avg 0.65 0.65 0.64 10000 weighted avg 0.65 0.65 0.64 10000 Accuracy Score: 0.6476 Root Mean Square Error: 2.5718475849085616
In [ ]:
data_df = pd.DataFrame(data)
data_df
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.577 | 0.476 | 0.473 | 1.229 | 1.537 | 1.530 | 54.209 |
| 1 | DNN_DNN | 0.590 | 0.477 | 0.479 | 1.225 | 1.555 | 1.541 | 66.991 |
| 2 | CNN | 0.871 | 0.604 | 0.597 | 0.405 | 1.417 | 1.462 | 45.146 |
| 3 | CNN_CNN | 0.805 | 0.618 | 0.610 | 0.565 | 1.234 | 1.269 | 77.749 |
| 4 | CNN_MP | 0.797 | 0.619 | 0.628 | 0.595 | 1.177 | 1.202 | 60.753 |
| 5 | DNN_DO | 0.582 | 0.489 | 0.483 | 1.226 | 1.529 | 1.510 | 69.229 |
| 6 | DNN_DO_DNN | 0.594 | 0.484 | 0.490 | 1.215 | 1.526 | 1.515 | 95.612 |
| 7 | CNN_DO | 0.863 | 0.611 | 0.597 | 0.407 | 1.447 | 1.500 | 68.012 |
| 8 | CNN_DO_CNN | 0.794 | 0.623 | 0.615 | 0.592 | 1.216 | 1.234 | 83.938 |
| 9 | CNN_DO_MP | 0.748 | 0.637 | 0.633 | 0.735 | 1.049 | 1.068 | 57.858 |
| 10 | DNN_DNN_DO | 0.614 | 0.491 | 0.495 | 1.162 | 1.566 | 1.542 | 120.223 |
| 11 | CNN_CNN_DO | 0.826 | 0.649 | 0.636 | 0.490 | 1.190 | 1.231 | 103.162 |
| 12 | CNN_MP_DO | 0.759 | 0.662 | 0.651 | 0.700 | 1.030 | 1.067 | 66.997 |
| 13 | DNN_DO_DNN_DO | 0.583 | 0.493 | 0.498 | 1.239 | 1.503 | 1.488 | 128.963 |
| 14 | CNN_DO_CNN_DO | 0.771 | 0.627 | 0.635 | 0.653 | 1.101 | 1.123 | 98.968 |
| 15 | DNN_BN | 0.500 | 0.392 | 0.404 | 1.449 | 1.972 | 1.922 | 25.628 |
| 16 | DNN_BN_DNN | 0.608 | 0.436 | 0.435 | 1.198 | 1.744 | 1.713 | 56.549 |
| 17 | CNN_BN | 0.747 | 0.423 | 0.431 | 0.728 | 2.650 | 2.620 | 30.352 |
| 18 | CNN_BN_CNN | 0.927 | 0.566 | 0.564 | 0.212 | 2.358 | 2.438 | 56.151 |
| 19 | CNN_BN_MP | 0.808 | 0.447 | 0.440 | 0.542 | 2.857 | 2.906 | 55.307 |
| 20 | DNN_DNN_BN | 0.501 | 0.423 | 0.423 | 1.453 | 1.640 | 1.616 | 68.419 |
| 21 | CNN_CNN_BN | 0.809 | 0.562 | 0.559 | 0.555 | 1.565 | 1.573 | 74.480 |
| 22 | CNN_MP_BN | 0.760 | 0.597 | 0.590 | 0.694 | 1.343 | 1.391 | 39.724 |
| 23 | DNN_BN_DNN_BN | 0.527 | 0.461 | 0.470 | 1.429 | 1.592 | 1.562 | 66.279 |
| 24 | CNN_BN_CNN_BN | 0.802 | 0.541 | 0.551 | 0.565 | 1.843 | 1.793 | 108.790 |
| 25 | DNN_DO_DNN_BN | 0.556 | 0.457 | 0.459 | 1.287 | 1.607 | 1.591 | 95.676 |
| 26 | DNN_BN_DNN_DO | 0.581 | 0.465 | 0.471 | 1.253 | 1.702 | 1.665 | 72.696 |
| 27 | CNN_BN_CNN_DO | 0.915 | 0.615 | 0.619 | 0.245 | 1.762 | 1.807 | 67.884 |
| 28 | CNN_DO_CNN_BN | 0.759 | 0.532 | 0.542 | 0.681 | 1.536 | 1.542 | 67.479 |
| 29 | CNN_DO_DNN | 0.891 | 0.595 | 0.592 | 0.378 | 1.570 | 1.642 | 121.519 |
| 30 | DNN_DO_CNN | 0.719 | 0.577 | 0.577 | 0.813 | 1.291 | 1.292 | 128.007 |
| 31 | CNN_DO_MP_DO | 0.755 | 0.659 | 0.658 | 0.699 | 1.000 | 1.025 | 76.572 |
| 32 | CNN_DO_MP_DO_0.5 | 0.677 | 0.653 | 0.649 | 0.932 | 1.056 | 1.071 | 83.411 |
| 33 | CNN_DO_MP_DO_0.5-1 | 0.743 | 0.649 | 0.641 | 0.735 | 1.039 | 1.065 | 96.591 |
| 34 | CNN_DO_MP_DO_0.5-2 | 0.714 | 0.657 | 0.648 | 0.824 | 1.006 | 1.036 | 85.457 |
| 35 | CNN_DO_MP_DO_0.1 | 0.758 | 0.646 | 0.641 | 0.708 | 1.019 | 1.054 | 67.886 |
| 36 | CNN_DO_MP_DO_0.1-1 | 0.744 | 0.657 | 0.646 | 0.736 | 1.001 | 1.036 | 67.855 |
| 37 | CNN_DO_MP_DO_0.1-2 | 0.776 | 0.649 | 0.641 | 0.640 | 1.039 | 1.073 | 75.070 |
| 38 | CNN_DO_MP_DO_0.2 | 0.724 | 0.643 | 0.643 | 0.786 | 1.017 | 1.044 | 53.498 |
| 39 | CNN_DO_MP_DO_0.2-1 | 0.738 | 0.654 | 0.644 | 0.761 | 1.005 | 1.037 | 61.789 |
| 40 | CNN_DO_MP_DO_0.2-2 | 0.772 | 0.651 | 0.648 | 0.651 | 1.032 | 1.063 | 84.308 |
In [ ]:
# Build Model 34: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_0.3'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.3))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 19s 17ms/step - accuracy: 0.4042 - loss: 1.6827 - val_accuracy: 0.5320 - val_loss: 1.3442 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 8s 7ms/step - accuracy: 0.5706 - loss: 1.2410 - val_accuracy: 0.5692 - val_loss: 1.2642 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.6022 - loss: 1.1485 - val_accuracy: 0.6032 - val_loss: 1.1494 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6403 - loss: 1.0380 - val_accuracy: 0.6188 - val_loss: 1.1260 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6602 - loss: 0.9828 - val_accuracy: 0.6264 - val_loss: 1.0833 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 0.6756 - loss: 0.9375 - val_accuracy: 0.6418 - val_loss: 1.0296 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6868 - loss: 0.9023 - val_accuracy: 0.6328 - val_loss: 1.0720 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6987 - loss: 0.8780 - val_accuracy: 0.6438 - val_loss: 1.0387 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7023 - loss: 0.8594 - val_accuracy: 0.6450 - val_loss: 1.0180 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7146 - loss: 0.8305 - val_accuracy: 0.6466 - val_loss: 1.0282 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7204 - loss: 0.8045 - val_accuracy: 0.6446 - val_loss: 1.0153 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7285 - loss: 0.7874 - val_accuracy: 0.6510 - val_loss: 1.0036 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7373 - loss: 0.7670 - val_accuracy: 0.6434 - val_loss: 1.0240 Epoch 14/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7422 - loss: 0.7391 - val_accuracy: 0.6656 - val_loss: 0.9827 Epoch 15/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7490 - loss: 0.7225 - val_accuracy: 0.6544 - val_loss: 1.0096 Epoch 16/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7505 - loss: 0.7124 - val_accuracy: 0.6474 - val_loss: 1.0293 Epoch 17/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7515 - loss: 0.7047 - val_accuracy: 0.6610 - val_loss: 0.9923 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.6565 - loss: 1.0130 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.70 0.65 0.67 1000 1 0.71 0.84 0.77 1000 2 0.55 0.43 0.48 1000 3 0.44 0.51 0.47 1000 4 0.58 0.65 0.61 1000 5 0.57 0.57 0.57 1000 6 0.71 0.79 0.74 1000 7 0.74 0.70 0.72 1000 8 0.81 0.70 0.75 1000 9 0.75 0.70 0.72 1000 accuracy 0.65 10000 macro avg 0.66 0.65 0.65 10000 weighted avg 0.66 0.65 0.65 10000 Accuracy Score: 0.652 Root Mean Square Error: 2.507827745280764
In [ ]:
# Build Model 34: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_0.3-1'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.3))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 11ms/step - accuracy: 0.3992 - loss: 1.6992 - val_accuracy: 0.5526 - val_loss: 1.3321 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.5732 - loss: 1.2241 - val_accuracy: 0.5798 - val_loss: 1.2588 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6148 - loss: 1.1091 - val_accuracy: 0.6130 - val_loss: 1.1422 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6444 - loss: 1.0298 - val_accuracy: 0.6382 - val_loss: 1.0789 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6635 - loss: 0.9740 - val_accuracy: 0.6316 - val_loss: 1.0754 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6789 - loss: 0.9265 - val_accuracy: 0.6474 - val_loss: 1.0366 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6960 - loss: 0.8844 - val_accuracy: 0.6390 - val_loss: 1.0359 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 6ms/step - accuracy: 0.7041 - loss: 0.8627 - val_accuracy: 0.6544 - val_loss: 1.0114 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7161 - loss: 0.8232 - val_accuracy: 0.6562 - val_loss: 1.0072 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7205 - loss: 0.8029 - val_accuracy: 0.6400 - val_loss: 1.0342 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7288 - loss: 0.7822 - val_accuracy: 0.6438 - val_loss: 1.0180 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7407 - loss: 0.7458 - val_accuracy: 0.6528 - val_loss: 0.9973 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6461 - loss: 1.0139 313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step Classification Report precision recall f1-score support 0 0.65 0.71 0.68 1000 1 0.77 0.80 0.78 1000 2 0.49 0.53 0.51 1000 3 0.47 0.49 0.48 1000 4 0.75 0.33 0.46 1000 5 0.55 0.59 0.57 1000 6 0.60 0.84 0.70 1000 7 0.74 0.72 0.73 1000 8 0.80 0.71 0.75 1000 9 0.76 0.73 0.74 1000 accuracy 0.65 10000 macro avg 0.66 0.65 0.64 10000 weighted avg 0.66 0.65 0.64 10000 Accuracy Score: 0.6458 Root Mean Square Error: 2.513404066201851
In [ ]:
# Build Model 34: CNN and MaxPool with dropouts between and after
name = 'CNN_DO_MP_DO_0.3-2'
k.clear_session()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
model.add(layers.Dropout(0.25))
model.add(layers.MaxPool2D((2, 2),strides=2))
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(units=10, activation=tf.nn.softmax))
keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
time_start = time.time()
history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
time_end = time.time()
preds = model.predict(image_test_norm)
test_pred = model.evaluate(image_test_norm, test_labels)
history_dict = history.history
history_df=pd.DataFrame(history_dict)
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
pred= model.predict(image_test_norm)
pred=np.argmax(pred, axis=1)
print_validation_report(test_labels, pred)
plot_confusion_matrix(test_labels, pred)
add_to_data(data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 9ms/step - accuracy: 0.4095 - loss: 1.6612 - val_accuracy: 0.5484 - val_loss: 1.3089 Epoch 2/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5771 - loss: 1.2244 - val_accuracy: 0.5854 - val_loss: 1.2148 Epoch 3/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6139 - loss: 1.1137 - val_accuracy: 0.6200 - val_loss: 1.1345 Epoch 4/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 6ms/step - accuracy: 0.6482 - loss: 1.0234 - val_accuracy: 0.6256 - val_loss: 1.1009 Epoch 5/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6670 - loss: 0.9640 - val_accuracy: 0.6256 - val_loss: 1.1009 Epoch 6/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6775 - loss: 0.9283 - val_accuracy: 0.6282 - val_loss: 1.0617 Epoch 7/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6926 - loss: 0.8903 - val_accuracy: 0.6552 - val_loss: 1.0266 Epoch 8/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7085 - loss: 0.8524 - val_accuracy: 0.6286 - val_loss: 1.0611 Epoch 9/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7164 - loss: 0.8196 - val_accuracy: 0.6550 - val_loss: 1.0126 Epoch 10/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7281 - loss: 0.7912 - val_accuracy: 0.6628 - val_loss: 0.9766 Epoch 11/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7339 - loss: 0.7772 - val_accuracy: 0.6474 - val_loss: 1.0190 Epoch 12/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7325 - loss: 0.7691 - val_accuracy: 0.6456 - val_loss: 1.0395 Epoch 13/200 704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7409 - loss: 0.7450 - val_accuracy: 0.6562 - val_loss: 0.9872 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6444 - loss: 1.0232 313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step Classification Report precision recall f1-score support 0 0.72 0.63 0.67 1000 1 0.83 0.72 0.77 1000 2 0.42 0.63 0.51 1000 3 0.45 0.46 0.46 1000 4 0.58 0.59 0.58 1000 5 0.54 0.54 0.54 1000 6 0.83 0.63 0.72 1000 7 0.71 0.74 0.73 1000 8 0.82 0.74 0.78 1000 9 0.74 0.75 0.74 1000 accuracy 0.64 10000 macro avg 0.66 0.64 0.65 10000 weighted avg 0.66 0.64 0.65 10000 Accuracy Score: 0.6439 Root Mean Square Error: 2.4614020394888763
In [ ]:
data_df = pd.DataFrame(data)
data_df
Out[ ]:
| model | accuracy | val_accuracy | test_accuracy | loss | val_loss | test_loss | time | |
|---|---|---|---|---|---|---|---|---|
| 0 | DNN | 0.577 | 0.476 | 0.473 | 1.229 | 1.537 | 1.530 | 54.209 |
| 1 | DNN_DNN | 0.590 | 0.477 | 0.479 | 1.225 | 1.555 | 1.541 | 66.991 |
| 2 | CNN | 0.871 | 0.604 | 0.597 | 0.405 | 1.417 | 1.462 | 45.146 |
| 3 | CNN_CNN | 0.805 | 0.618 | 0.610 | 0.565 | 1.234 | 1.269 | 77.749 |
| 4 | CNN_MP | 0.797 | 0.619 | 0.628 | 0.595 | 1.177 | 1.202 | 60.753 |
| 5 | DNN_DO | 0.582 | 0.489 | 0.483 | 1.226 | 1.529 | 1.510 | 69.229 |
| 6 | DNN_DO_DNN | 0.594 | 0.484 | 0.490 | 1.215 | 1.526 | 1.515 | 95.612 |
| 7 | CNN_DO | 0.863 | 0.611 | 0.597 | 0.407 | 1.447 | 1.500 | 68.012 |
| 8 | CNN_DO_CNN | 0.794 | 0.623 | 0.615 | 0.592 | 1.216 | 1.234 | 83.938 |
| 9 | CNN_DO_MP | 0.748 | 0.637 | 0.633 | 0.735 | 1.049 | 1.068 | 57.858 |
| 10 | DNN_DNN_DO | 0.614 | 0.491 | 0.495 | 1.162 | 1.566 | 1.542 | 120.223 |
| 11 | CNN_CNN_DO | 0.826 | 0.649 | 0.636 | 0.490 | 1.190 | 1.231 | 103.162 |
| 12 | CNN_MP_DO | 0.759 | 0.662 | 0.651 | 0.700 | 1.030 | 1.067 | 66.997 |
| 13 | DNN_DO_DNN_DO | 0.583 | 0.493 | 0.498 | 1.239 | 1.503 | 1.488 | 128.963 |
| 14 | CNN_DO_CNN_DO | 0.771 | 0.627 | 0.635 | 0.653 | 1.101 | 1.123 | 98.968 |
| 15 | DNN_BN | 0.500 | 0.392 | 0.404 | 1.449 | 1.972 | 1.922 | 25.628 |
| 16 | DNN_BN_DNN | 0.608 | 0.436 | 0.435 | 1.198 | 1.744 | 1.713 | 56.549 |
| 17 | CNN_BN | 0.747 | 0.423 | 0.431 | 0.728 | 2.650 | 2.620 | 30.352 |
| 18 | CNN_BN_CNN | 0.927 | 0.566 | 0.564 | 0.212 | 2.358 | 2.438 | 56.151 |
| 19 | CNN_BN_MP | 0.808 | 0.447 | 0.440 | 0.542 | 2.857 | 2.906 | 55.307 |
| 20 | DNN_DNN_BN | 0.501 | 0.423 | 0.423 | 1.453 | 1.640 | 1.616 | 68.419 |
| 21 | CNN_CNN_BN | 0.809 | 0.562 | 0.559 | 0.555 | 1.565 | 1.573 | 74.480 |
| 22 | CNN_MP_BN | 0.760 | 0.597 | 0.590 | 0.694 | 1.343 | 1.391 | 39.724 |
| 23 | DNN_BN_DNN_BN | 0.527 | 0.461 | 0.470 | 1.429 | 1.592 | 1.562 | 66.279 |
| 24 | CNN_BN_CNN_BN | 0.802 | 0.541 | 0.551 | 0.565 | 1.843 | 1.793 | 108.790 |
| 25 | DNN_DO_DNN_BN | 0.556 | 0.457 | 0.459 | 1.287 | 1.607 | 1.591 | 95.676 |
| 26 | DNN_BN_DNN_DO | 0.581 | 0.465 | 0.471 | 1.253 | 1.702 | 1.665 | 72.696 |
| 27 | CNN_BN_CNN_DO | 0.915 | 0.615 | 0.619 | 0.245 | 1.762 | 1.807 | 67.884 |
| 28 | CNN_DO_CNN_BN | 0.759 | 0.532 | 0.542 | 0.681 | 1.536 | 1.542 | 67.479 |
| 29 | CNN_DO_DNN | 0.891 | 0.595 | 0.592 | 0.378 | 1.570 | 1.642 | 121.519 |
| 30 | DNN_DO_CNN | 0.719 | 0.577 | 0.577 | 0.813 | 1.291 | 1.292 | 128.007 |
| 31 | CNN_DO_MP_DO | 0.755 | 0.659 | 0.658 | 0.699 | 1.000 | 1.025 | 76.572 |
| 32 | CNN_DO_MP_DO_0.5 | 0.677 | 0.653 | 0.649 | 0.932 | 1.056 | 1.071 | 83.411 |
| 33 | CNN_DO_MP_DO_0.5-1 | 0.743 | 0.649 | 0.641 | 0.735 | 1.039 | 1.065 | 96.591 |
| 34 | CNN_DO_MP_DO_0.5-2 | 0.714 | 0.657 | 0.648 | 0.824 | 1.006 | 1.036 | 85.457 |
| 35 | CNN_DO_MP_DO_0.1 | 0.758 | 0.646 | 0.641 | 0.708 | 1.019 | 1.054 | 67.886 |
| 36 | CNN_DO_MP_DO_0.1-1 | 0.744 | 0.657 | 0.646 | 0.736 | 1.001 | 1.036 | 67.855 |
| 37 | CNN_DO_MP_DO_0.1-2 | 0.776 | 0.649 | 0.641 | 0.640 | 1.039 | 1.073 | 75.070 |
| 38 | CNN_DO_MP_DO_0.2 | 0.724 | 0.643 | 0.643 | 0.786 | 1.017 | 1.044 | 53.498 |
| 39 | CNN_DO_MP_DO_0.2-1 | 0.738 | 0.654 | 0.644 | 0.761 | 1.005 | 1.037 | 61.789 |
| 40 | CNN_DO_MP_DO_0.2-2 | 0.772 | 0.651 | 0.648 | 0.651 | 1.032 | 1.063 | 84.308 |
| 41 | CNN_DO_MP_DO_0.3 | 0.749 | 0.661 | 0.652 | 0.719 | 0.992 | 1.030 | 108.863 |
| 42 | CNN_DO_MP_DO_0.3-1 | 0.732 | 0.653 | 0.646 | 0.771 | 0.997 | 1.029 | 64.171 |
| 43 | CNN_DO_MP_DO_0.3-2 | 0.737 | 0.656 | 0.644 | 0.755 | 0.987 | 1.041 | 66.907 |
In [ ]:
plt.subplots(figsize=(16,12))
plt.tight_layout()
display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
In [ ]:
pred_final= model.predict(image_test_norm)
pred_final=np.argmax(pred_final, axis=1)
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
In [ ]:
def print_validation_report(y_test, predictions):
print("Classification Report")
print(classification_report(y_test, predictions))
print('Accuracy Score: {}'.format(accuracy_score(y_test, predictions)))
print('Root Mean Square Error: {}'.format(np.sqrt(MSE(y_test, predictions))))
def plot_confusion_matrix(y_true, y_pred):
mtx = confusion_matrix(y_true, y_pred)
fig, ax = plt.subplots(figsize=(16,12))
sns.heatmap(mtx, annot=True, fmt='d', linewidths=.75, cbar=False, ax=ax,cmap='Blues',linecolor='white')
# square=True,
plt.ylabel('true label')
plt.xlabel('predicted label')
In [ ]:
print_validation_report(test_labels, pred_final)
Classification Report
precision recall f1-score support
0 0.81 0.76 0.79 1000
1 0.91 0.85 0.88 1000
2 0.67 0.67 0.67 1000
3 0.54 0.67 0.60 1000
4 0.70 0.73 0.71 1000
5 0.75 0.57 0.65 1000
6 0.72 0.88 0.79 1000
7 0.91 0.65 0.76 1000
8 0.84 0.88 0.86 1000
9 0.79 0.89 0.84 1000
accuracy 0.75 10000
macro avg 0.76 0.75 0.75 10000
weighted avg 0.76 0.75 0.75 10000
Accuracy Score: 0.7536
Root Mean Square Error: 2.0098507407267836
In [ ]: